threads
listlengths
1
2.99k
[ { "msg_contents": "Hi,\n\nPer Coverity.\n3 out-of-bounds at function AppendJumble.\n\nThey have the face, smell and color of typo.\nAnd we usually increment the character count after a memcpy.\n\nCoverity no longer complained after the patch.\n\nThoughts?\n\nregards,\nRanier Vilela", "msg_date": "Mon, 21 Jun 2021 14:07:59 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Out-of-bounds (src/backend/utils/misc/queryjumble.c)" }, { "msg_contents": "Ranier Vilela <ranier.vf@gmail.com> writes:\n> Per Coverity.\n> 3 out-of-bounds at function AppendJumble.\n\n> They have the face, smell and color of typo.\n> And we usually increment the character count after a memcpy.\n\n> Coverity no longer complained after the patch.\n\n> Thoughts?\n\nThis patch is incorrect on its face, as you would know if you'd\nspent even a couple minutes absorbing the comment in that function.\n\nI wonder about Coverity here ... independently of whether the\nhash-accumulation logic does what we want, it looks to me like\nthe proposed change doesn't so much remove a buffer overrun as\ncreate one. It would break the property jumble_len < JUMBLE_SIZE\nthat the subsequent lines rely on.\n\nPlease stop sending us random patches and expecting us to sort\nout which ones are valid. You're rapidly approaching the status\nof \"boy who cried wolf too many times\".\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 21 Jun 2021 13:19:19 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Out-of-bounds (src/backend/utils/misc/queryjumble.c)" } ]
[ { "msg_contents": "The following generates an assertion failure. Quick testing with start \nand stop as well as the core dump shows it’s failing on the execution of \n`schema_name := schema_name(i)` immediately after COMMIT, because \nthere’s no active snapshot. On a build without asserts I get a failure \nin GetActiveSnapshot() (second stack trace). This works fine on \n12_STABLE, but fails on 13_STABLE and HEAD.\n\n\nCREATE OR REPLACE FUNCTION public.schema_name(i integer)\n  RETURNS text\n  LANGUAGE sql\n  IMMUTABLE\nAS $function$\nSELECT 'test_' || trim(to_char(i, '000000'))\n$function$;\n\nCREATE OR REPLACE PROCEDURE public.build_schema(start integer, stop \ninteger, commit_interval integer DEFAULT 10, do_insert boolean DEFAULT true)\n  LANGUAGE plpgsql\nAS $procedure$\nDECLARE\n     schema_name text;\nBEGIN\nFOR i IN start .. stop LOOP\n     schema_name := schema_name(i);\n     IF i % commit_interval = 0 THEN\n         --RAISE NOTICE 'COMMIT CREATE step %', i;\n         COMMIT;\n     END IF;\nEND LOOP;\nEND$procedure$;\n\n\nCALL build_schema(1,11);\n\n\n<assert failure>\n\nCore file '/cores/core.1912' (x86_64) was loaded.\n\n(lldb) bt\n* thread #1, stop reason = signal SIGSTOP\n   * frame #0: 0x00007fff6c3ae33a libsystem_kernel.dylib`__pthread_kill + 10\n     frame #1: 0x00007fff6c46ae60 libsystem_pthread.dylib`pthread_kill + 430\n     frame #2: 0x00007fff6c335808 libsystem_c.dylib`abort + 120\n     frame #3: 0x000000010af1af6d \npostgres`ExceptionalCondition(conditionName=\"ActiveSnapshotSet()\", \nerrorType=\"FailedAssertion\", fileName=\"postgres.c\", lineNumber=867) at \nassert.c:67:2\n     frame #4: 0x000000010ad3aeb3 \npostgres`pg_plan_query(querytree=0x00007ff663023848, \nquery_string=\"\\nSELECT 'test_' || trim(to_char(i, '000000'))\\n\", \ncursorOptions=256, boundParams=0x0000000000000000) at postgres.c:867:2\n     frame #5: 0x000000010aad9059 \npostgres`init_execution_state(queryTree_list=0x00007ff663024208, \nfcache=0x00007ff663022720, lazyEvalOK=true) at functions.c:513:12\n     frame #6: 0x000000010aad6dec \npostgres`init_sql_fcache(fcinfo=0x00007ff663035918, collation=0, \nlazyEvalOK=true) at functions.c:787:23\n     frame #7: 0x000000010aad5ffa \npostgres`fmgr_sql(fcinfo=0x00007ff663035918) at functions.c:1070:3\n     frame #8: 0x000000010aaaf660 \npostgres`ExecInterpExpr(state=0x00007ff663035828, \necontext=0x00007ff663035738, isnull=0x00007ffee5459bbf) at \nexecExprInterp.c:680:8\n     frame #9: 0x000000010aaae9b7 \npostgres`ExecInterpExprStillValid(state=0x00007ff663035828, \necontext=0x00007ff663035738, isNull=0x00007ffee5459bbf) at \nexecExprInterp.c:1807:9\n     frame #10: 0x00000001163e5b5a \nplpgsql.so`ExecEvalExpr(state=0x00007ff663035828, \necontext=0x00007ff663035738, isNull=0x00007ffee5459bbf) at executor.h:303:9\n     frame #11: 0x00000001163e4fe0 \nplpgsql.so`exec_eval_simple_expr(estate=0x00007ffee545a080, \nexpr=0x00007ff662032db0, result=0x00007ffee5459b68, \nisNull=0x00007ffee5459bbf, rettype=0x00007ffee5459bb8, \nrettypmod=0x00007ffee5459bb4) at pl_exec.c:6328:12\n     frame #12: 0x00000001163e4887 \nplpgsql.so`exec_eval_expr(estate=0x00007ffee545a080, \nexpr=0x00007ff662032db0, isNull=0x00007ffee5459bbf, \nrettype=0x00007ffee5459bb8, rettypmod=0x00007ffee5459bb4) at \npl_exec.c:5833:6\n     frame #13: 0x00000001163e30c2 \nplpgsql.so`exec_assign_expr(estate=0x00007ffee545a080, \ntarget=0x00007ff66300e4d0, expr=0x00007ff662032db0) at pl_exec.c:4973:10\n     frame #14: 0x00000001163dc407 \nplpgsql.so`exec_stmt_assign(estate=0x00007ffee545a080, \nstmt=0x00007ff662032e80) at pl_exec.c:2112:2\n     frame #15: 0x00000001163d9de2 \nplpgsql.so`exec_stmt(estate=0x00007ffee545a080, stmt=0x00007ff662032e80) \nat pl_exec.c:1980:9\n     frame #16: 0x00000001163e3387 \nplpgsql.so`exec_stmts(estate=0x00007ffee545a080, \nstmts=0x00007ff662032eb8) at pl_exec.c:1943:14\n     frame #17: 0x00000001163ddd7c \nplpgsql.so`exec_stmt_fori(estate=0x00007ffee545a080, \nstmt=0x00007ff662032ca8) at pl_exec.c:2783:8\n     frame #18: 0x00000001163d9ea2 \nplpgsql.so`exec_stmt(estate=0x00007ffee545a080, stmt=0x00007ff662032ca8) \nat pl_exec.c:2012:9\n     frame #19: 0x00000001163e3387 \nplpgsql.so`exec_stmts(estate=0x00007ffee545a080, \nstmts=0x00007ff6620328d0) at pl_exec.c:1943:14\n     frame #20: 0x00000001163dc20d \nplpgsql.so`exec_stmt_block(estate=0x00007ffee545a080, \nblock=0x00007ff6620231c8) at pl_exec.c:1884:8\n     frame #21: 0x00000001163d9dca \nplpgsql.so`exec_stmt(estate=0x00007ffee545a080, stmt=0x00007ff6620231c8) \nat pl_exec.c:1976:9\n     frame #22: 0x00000001163d8393 \nplpgsql.so`plpgsql_exec_function(func=0x00007ff666055738, \nfcinfo=0x00007ffee545a478, simple_eval_estate=0x0000000000000000, \nsimple_eval_resowner=0x0000000000000000, atomic=false) at pl_exec.c:610:7\n     frame #23: 0x00000001163f6c4a \nplpgsql.so`plpgsql_call_handler(fcinfo=0x00007ffee545a478) at \npl_handler.c:265:13\n     frame #24: 0x000000010aa22232 \npostgres`ExecuteCallStmt(stmt=0x00007ff662010320, \nparams=0x0000000000000000, atomic=false, dest=0x00007ff662010ba0) at \nfunctioncmds.c:2232:11\n     frame #25: 0x000000010ad454af \npostgres`standard_ProcessUtility(pstmt=0x00007ff6620103e8, \nqueryString=\"call build_schema (1,11);\", \ncontext=PROCESS_UTILITY_TOPLEVEL, params=0x0000000000000000, \nqueryEnv=0x0000000000000000, dest=0x00007ff662010ba0, \nqc=0x00007ffee545b070) at utility.c:817:4\n     frame #26: 0x000000010ad44c82 \npostgres`ProcessUtility(pstmt=0x00007ff6620103e8, queryString=\"call \nbuild_schema (1,11);\", context=PROCESS_UTILITY_TOPLEVEL, \nparams=0x0000000000000000, queryEnv=0x0000000000000000, \ndest=0x00007ff662010ba0, qc=0x00007ffee545b070) at utility.c:524:3\n     frame #27: 0x000000010ad4443e \npostgres`PortalRunUtility(portal=0x00007ff66282f320, \npstmt=0x00007ff6620103e8, isTopLevel=true, setHoldSnapshot=false, \ndest=0x00007ff662010ba0, qc=0x00007ffee545b070) at pquery.c:1145:2\n     frame #28: 0x000000010ad4359d \npostgres`PortalRunMulti(portal=0x00007ff66282f320, isTopLevel=true, \nsetHoldSnapshot=false, dest=0x00007ff662010ba0, \naltdest=0x00007ff662010ba0, qc=0x00007ffee545b070) at pquery.c:1301:5\n     frame #29: 0x000000010ad42b2e \npostgres`PortalRun(portal=0x00007ff66282f320, count=9223372036854775807, \nisTopLevel=true, run_once=true, dest=0x00007ff662010ba0, \naltdest=0x00007ff662010ba0, qc=0x00007ffee545b070) at pquery.c:786:5\n     frame #30: 0x000000010ad3e0c5 \npostgres`exec_simple_query(query_string=\"call build_schema (1,11);\") at \npostgres.c:1239:10\n     frame #31: 0x000000010ad3d26d postgres`PostgresMain(argc=1, \nargv=0x00007ff66280f670, dbname=\"nasbyj\", username=\"nasbyj\") at \npostgres.c:4339:7\n     frame #32: 0x000000010ac6752a \npostgres`BackendRun(port=0x00007ff664404080) at postmaster.c:4526:2\n     frame #33: 0x000000010ac66979 \npostgres`BackendStartup(port=0x00007ff664404080) at postmaster.c:4210:3\n     frame #34: 0x000000010ac658fd postgres`ServerLoop at \npostmaster.c:1739:7\n     frame #35: 0x000000010ac633e4 postgres`PostmasterMain(argc=1, \nargv=0x00007ff661c06cc0) at postmaster.c:1412:11\n     frame #36: 0x000000010ab45679 postgres`main(argc=1, \nargv=0x00007ff661c06cc0) at main.c:210:3\n     frame #37: 0x00007fff6c266cc9 libdyld.dylib`start + 1\n     frame #38: 0x00007fff6c266cc9 libdyld.dylib`start + 1\n(lldb) fr 4\ninvalid command 'frame 4'.\n(lldb) fr s 4\nframe #4: 0x000000010ad3aeb3 \npostgres`pg_plan_query(querytree=0x00007ff663023848, \nquery_string=\"\\nSELECT 'test_' || trim(to_char(i, '000000'))\\n\", \ncursorOptions=256, boundParams=0x0000000000000000) at postgres.c:867:2\n    864             return NULL;\n    865\n    866         /* Planner must have a snapshot in case it calls \nuser-defined functions. */\n-> 867         Assert(ActiveSnapshotSet());\n    868\n    869         TRACE_POSTGRESQL_QUERY_PLAN_START();\n    870\n(lldb)\n\n\n... non-assert build ...\n\n\n(lldb) bt\npostgres was compiled with optimization - stepping may behave oddly; \nvariables may not be available.\n* thread #1, stop reason = signal SIGSTOP\n   * frame #0: 0x0000000101d207db postgres`GetActiveSnapshot at \nsnapmgr.c:845:25 [opt]\n     frame #1: 0x0000000101a83f25 postgres`fmgr_sql [inlined] \npostquel_start(es=0x00007fd19789c5a8, fcache=<unavailable>) at \nfunctions.c:832:9 [opt]\n     frame #2: 0x0000000101a83ec6 \npostgres`fmgr_sql(fcinfo=<unavailable>) at functions.c:1161 [opt]\n     frame #3: 0x0000000101a6fae1 \npostgres`ExecInterpExpr(state=<unavailable>, econtext=<unavailable>, \nisnull=0x00007ffeee304a57) at execExprInterp.c:680:8 [opt]\n     frame #4: 0x00000001028ddc10 plpgsql.so`exec_eval_expr [inlined] \nExecEvalExpr(state=<unavailable>, econtext=0x00007fd19a818128, \nisNull=0x00007ffeee304a57) at executor.h:303:9 [opt]\n     frame #5: 0x00000001028ddc06 plpgsql.so`exec_eval_expr [inlined] \nexec_eval_simple_expr(estate=0x00007ffeee304f18, \nexpr=0x00007fd1980268c0, isNull=<unavailable>, rettype=<unavailable>, \nrettypmod=0x00007fd19a80fe00) at pl_exec.c:6328 [opt]\n     frame #6: 0x00000001028ddb2a \nplpgsql.so`exec_eval_expr(estate=0x00007ffeee304f18, \nexpr=0x00007fd1980268c0, isNull=<unavailable>, rettype=<unavailable>, \nrettypmod=0x00007fd19a80fe00) at pl_exec.c:5833 [opt]\n     frame #7: 0x00000001028dd258 \nplpgsql.so`exec_assign_expr(estate=0x00007ffeee304f18, \ntarget=0x00007fd19a810120, expr=0x00007fd1980268c0) at pl_exec.c:4973:10 \n[opt]\n     frame #8: 0x00000001028d86a2 plpgsql.so`exec_stmt [inlined] \nexec_stmt_assign(estate=0x00007ffeee304f18, stmt=0x00007fd198026980) at \npl_exec.c:2112:2 [opt]\n     frame #9: 0x00000001028d868a \nplpgsql.so`exec_stmt(estate=0x00007ffeee304f18, stmt=0x00007fd198026980) \nat pl_exec.c:1980 [opt]\n     frame #10: 0x00000001028d9941 plpgsql.so`exec_stmt at \npl_exec.c:1943:14 [opt]\n     frame #11: 0x00000001028d9914 plpgsql.so`exec_stmt [inlined] \nexec_stmt_fori(estate=0x00007ffeee304f18, stmt=0x00007fd1980267d8) at \npl_exec.c:2783 [opt]\n     frame #12: 0x00000001028d98a9 \nplpgsql.so`exec_stmt(estate=<unavailable>, stmt=<unavailable>) at \npl_exec.c:2012 [opt]\n     frame #13: 0x00000001028dbf20 plpgsql.so`exec_stmt_block at \npl_exec.c:1943:14 [opt]\n     frame #14: 0x00000001028dbefb \nplpgsql.so`exec_stmt_block(estate=0x00007ffeee304f18, \nblock=0x00007fd198026c20) at pl_exec.c:1884 [opt]\n     frame #15: 0x00000001028d7ec3 \nplpgsql.so`exec_stmt(estate=0x00007ffeee304f18, stmt=0x00007fd198026c20) \nat pl_exec.c:1976:9 [opt]\n     frame #16: 0x00000001028d6f45 \nplpgsql.so`plpgsql_exec_function(func=0x00007fd19785cc60, \nfcinfo=0x00007ffeee3051f0, simple_eval_estate=<unavailable>, \nsimple_eval_resowner=<unavailable>, atomic=<unavailable>) at \npl_exec.c:610:7 [opt]\n     frame #17: 0x00000001028e9122 \nplpgsql.so`plpgsql_call_handler(fcinfo=0x00007ffeee3051f0) at \npl_handler.c:265:13 [opt]\n     frame #18: 0x0000000101a2b5e4 \npostgres`ExecuteCallStmt(stmt=0x00007fd1978102b0, \nparams=0x00007fd19a80f4e8, atomic=<unavailable>, \ndest=0x00007fd1978108d0) at functioncmds.c:2232:11 [opt]\n     frame #19: 0x0000000101bd31f5 \npostgres`standard_ProcessUtility(pstmt=0x00007fd197810360, \nqueryString=\"CALL build_schema(1,11);\", \ncontext=PROCESS_UTILITY_TOPLEVEL, params=0x0000000000000000, \nqueryEnv=0x0000000000000000, dest=0x00007fd1978108d0, \nqc=0x00007ffeee305b80) at utility.c:817:4 [opt]\n     frame #20: 0x0000000101bd2c58 \npostgres`ProcessUtility(pstmt=0x00007fd197810360, \nqueryString=<unavailable>, context=<unavailable>, params=<unavailable>, \nqueryEnv=<unavailable>, dest=<unavailable>, qc=0x00007ffeee305b80) at \nutility.c:524:3 [opt]\n     frame #21: 0x0000000101bd29b1 \npostgres`PortalRunUtility(portal=0x00007fd197838118, \npstmt=0x00007fd197810360, isTopLevel=<unavailable>, \nsetHoldSnapshot=<unavailable>, dest=<unavailable>, \nqc=0x00007ffeee305b80) at pquery.c:1145:2 [opt]\n     frame #22: 0x0000000101bd2028 \npostgres`PortalRunMulti(portal=0x00007fd197838118, isTopLevel=true, \nsetHoldSnapshot=false, dest=0x00007fd1978108d0, \naltdest=0x00007fd1978108d0, qc=<unavailable>) at pquery.c:0 [opt]\n     frame #23: 0x0000000101bd1afd \npostgres`PortalRun(portal=0x00007fd197838118, count=9223372036854775807, \nisTopLevel=true, run_once=<unavailable>, dest=0x00007fd1978108d0, \naltdest=0x00007fd1978108d0, qc=0x00007ffeee305b80) at pquery.c:786:5 [opt]\n     frame #24: 0x0000000101bd0d5a \npostgres`exec_simple_query(query_string=\"CALL build_schema(1,11);\") at \npostgres.c:1239:10 [opt]\n     frame #25: 0x0000000101bce608 \npostgres`PostgresMain(argc=<unavailable>, argv=<unavailable>, \ndbname=<unavailable>, username=<unavailable>) at postgres.c:0 [opt]\n     frame #26: 0x0000000101b57e11 \npostgres`BackendRun(port=<unavailable>) at postmaster.c:4526:2 [opt]\n     frame #27: 0x0000000101b57590 postgres`ServerLoop [inlined] \nBackendStartup(port=<unavailable>) at postmaster.c:4210:3 [opt]\n     frame #28: 0x0000000101b5756f postgres`ServerLoop at \npostmaster.c:1739 [opt]\n     frame #29: 0x0000000101b5474a \npostgres`PostmasterMain(argc=<unavailable>, argv=0x00007fd197406cc0) at \npostmaster.c:1412:11 [opt]\n     frame #30: 0x0000000101abf3ef postgres`main(argc=<unavailable>, \nargv=<unavailable>) at main.c:210:3 [opt]\n     frame #31: 0x00007fff6c266cc9 libdyld.dylib`start + 1\n     frame #32: 0x00007fff6c266cc9 libdyld.dylib`start + 1\n(lldb)\n\n\n\n\n\n\n\n\n\nThe following\n generates an assertion\n failure. Quick testing with start and stop as well as the core\n dump shows it’s\n failing on the execution of `schema_name := schema_name(i)`\n immediately after\n COMMIT, because there’s no active snapshot. On a build without\n asserts I get a failure in GetActiveSnapshot() (second stack\n trace). This works fine on 12_STABLE, but fails on 13_STABLE and\n HEAD.\n\n\nCREATE OR\n REPLACE FUNCTION public.schema_name(i integer)\n  RETURNS text\n  LANGUAGE sql\n  IMMUTABLE\n AS $function$\n SELECT 'test_' || trim(to_char(i, '000000'))\n $function$;\nCREATE OR\n REPLACE PROCEDURE public.build_schema(start integer, stop\n integer, commit_interval integer DEFAULT 10, do_insert boolean\n DEFAULT true)\n  LANGUAGE plpgsql\n AS $procedure$\n DECLARE\n     schema_name text;\n BEGIN\n FOR i IN start .. stop LOOP\n     schema_name := schema_name(i);\n     IF i % commit_interval = 0 THEN\n         --RAISE NOTICE 'COMMIT CREATE step %', i;\n         COMMIT;\n     END IF;\n END LOOP;\n END$procedure$;\n\n\nCALL\n build_schema(1,11);\n\n\n<assert\n failure>\n\nCore file '/cores/core.1912' (x86_64) was loaded.\n\n (lldb) bt\n * thread #1, stop reason = signal SIGSTOP\n   * frame #0: 0x00007fff6c3ae33a\n libsystem_kernel.dylib`__pthread_kill + 10\n     frame #1: 0x00007fff6c46ae60\n libsystem_pthread.dylib`pthread_kill + 430\n     frame #2: 0x00007fff6c335808 libsystem_c.dylib`abort + 120\n     frame #3: 0x000000010af1af6d\n postgres`ExceptionalCondition(conditionName=\"ActiveSnapshotSet()\",\n errorType=\"FailedAssertion\", fileName=\"postgres.c\",\n lineNumber=867) at assert.c:67:2\n     frame #4: 0x000000010ad3aeb3\n postgres`pg_plan_query(querytree=0x00007ff663023848,\n query_string=\"\\nSELECT 'test_' || trim(to_char(i, '000000'))\\n\",\n cursorOptions=256, boundParams=0x0000000000000000) at\n postgres.c:867:2\n     frame #5: 0x000000010aad9059\n postgres`init_execution_state(queryTree_list=0x00007ff663024208,\n fcache=0x00007ff663022720, lazyEvalOK=true) at functions.c:513:12\n     frame #6: 0x000000010aad6dec\n postgres`init_sql_fcache(fcinfo=0x00007ff663035918, collation=0,\n lazyEvalOK=true) at functions.c:787:23\n     frame #7: 0x000000010aad5ffa\n postgres`fmgr_sql(fcinfo=0x00007ff663035918) at functions.c:1070:3\n     frame #8: 0x000000010aaaf660\n postgres`ExecInterpExpr(state=0x00007ff663035828,\n econtext=0x00007ff663035738, isnull=0x00007ffee5459bbf) at\n execExprInterp.c:680:8\n     frame #9: 0x000000010aaae9b7\n postgres`ExecInterpExprStillValid(state=0x00007ff663035828,\n econtext=0x00007ff663035738, isNull=0x00007ffee5459bbf) at\n execExprInterp.c:1807:9\n     frame #10: 0x00000001163e5b5a\n plpgsql.so`ExecEvalExpr(state=0x00007ff663035828,\n econtext=0x00007ff663035738, isNull=0x00007ffee5459bbf) at\n executor.h:303:9\n     frame #11: 0x00000001163e4fe0\n plpgsql.so`exec_eval_simple_expr(estate=0x00007ffee545a080,\n expr=0x00007ff662032db0, result=0x00007ffee5459b68,\n isNull=0x00007ffee5459bbf, rettype=0x00007ffee5459bb8,\n rettypmod=0x00007ffee5459bb4) at pl_exec.c:6328:12\n     frame #12: 0x00000001163e4887\n plpgsql.so`exec_eval_expr(estate=0x00007ffee545a080,\n expr=0x00007ff662032db0, isNull=0x00007ffee5459bbf,\n rettype=0x00007ffee5459bb8, rettypmod=0x00007ffee5459bb4) at\n pl_exec.c:5833:6\n     frame #13: 0x00000001163e30c2\n plpgsql.so`exec_assign_expr(estate=0x00007ffee545a080,\n target=0x00007ff66300e4d0, expr=0x00007ff662032db0) at\n pl_exec.c:4973:10\n     frame #14: 0x00000001163dc407\n plpgsql.so`exec_stmt_assign(estate=0x00007ffee545a080,\n stmt=0x00007ff662032e80) at pl_exec.c:2112:2\n     frame #15: 0x00000001163d9de2\n plpgsql.so`exec_stmt(estate=0x00007ffee545a080,\n stmt=0x00007ff662032e80) at pl_exec.c:1980:9\n     frame #16: 0x00000001163e3387\n plpgsql.so`exec_stmts(estate=0x00007ffee545a080,\n stmts=0x00007ff662032eb8) at pl_exec.c:1943:14\n     frame #17: 0x00000001163ddd7c\n plpgsql.so`exec_stmt_fori(estate=0x00007ffee545a080,\n stmt=0x00007ff662032ca8) at pl_exec.c:2783:8\n     frame #18: 0x00000001163d9ea2\n plpgsql.so`exec_stmt(estate=0x00007ffee545a080,\n stmt=0x00007ff662032ca8) at pl_exec.c:2012:9\n     frame #19: 0x00000001163e3387\n plpgsql.so`exec_stmts(estate=0x00007ffee545a080,\n stmts=0x00007ff6620328d0) at pl_exec.c:1943:14\n     frame #20: 0x00000001163dc20d\n plpgsql.so`exec_stmt_block(estate=0x00007ffee545a080,\n block=0x00007ff6620231c8) at pl_exec.c:1884:8\n     frame #21: 0x00000001163d9dca\n plpgsql.so`exec_stmt(estate=0x00007ffee545a080,\n stmt=0x00007ff6620231c8) at pl_exec.c:1976:9\n     frame #22: 0x00000001163d8393\n plpgsql.so`plpgsql_exec_function(func=0x00007ff666055738,\n fcinfo=0x00007ffee545a478, simple_eval_estate=0x0000000000000000,\n simple_eval_resowner=0x0000000000000000, atomic=false) at\n pl_exec.c:610:7\n     frame #23: 0x00000001163f6c4a\n plpgsql.so`plpgsql_call_handler(fcinfo=0x00007ffee545a478) at\n pl_handler.c:265:13\n     frame #24: 0x000000010aa22232\n postgres`ExecuteCallStmt(stmt=0x00007ff662010320,\n params=0x0000000000000000, atomic=false, dest=0x00007ff662010ba0)\n at functioncmds.c:2232:11\n     frame #25: 0x000000010ad454af\n postgres`standard_ProcessUtility(pstmt=0x00007ff6620103e8,\n queryString=\"call build_schema (1,11);\",\n context=PROCESS_UTILITY_TOPLEVEL, params=0x0000000000000000,\n queryEnv=0x0000000000000000, dest=0x00007ff662010ba0,\n qc=0x00007ffee545b070) at utility.c:817:4\n     frame #26: 0x000000010ad44c82\n postgres`ProcessUtility(pstmt=0x00007ff6620103e8,\n queryString=\"call build_schema (1,11);\",\n context=PROCESS_UTILITY_TOPLEVEL, params=0x0000000000000000,\n queryEnv=0x0000000000000000, dest=0x00007ff662010ba0,\n qc=0x00007ffee545b070) at utility.c:524:3\n     frame #27: 0x000000010ad4443e\n postgres`PortalRunUtility(portal=0x00007ff66282f320,\n pstmt=0x00007ff6620103e8, isTopLevel=true, setHoldSnapshot=false,\n dest=0x00007ff662010ba0, qc=0x00007ffee545b070) at pquery.c:1145:2\n     frame #28: 0x000000010ad4359d\n postgres`PortalRunMulti(portal=0x00007ff66282f320,\n isTopLevel=true, setHoldSnapshot=false, dest=0x00007ff662010ba0,\n altdest=0x00007ff662010ba0, qc=0x00007ffee545b070) at\n pquery.c:1301:5\n     frame #29: 0x000000010ad42b2e\n postgres`PortalRun(portal=0x00007ff66282f320,\n count=9223372036854775807, isTopLevel=true, run_once=true,\n dest=0x00007ff662010ba0, altdest=0x00007ff662010ba0,\n qc=0x00007ffee545b070) at pquery.c:786:5\n     frame #30: 0x000000010ad3e0c5\n postgres`exec_simple_query(query_string=\"call build_schema\n (1,11);\") at postgres.c:1239:10\n     frame #31: 0x000000010ad3d26d postgres`PostgresMain(argc=1,\n argv=0x00007ff66280f670, dbname=\"nasbyj\", username=\"nasbyj\") at\n postgres.c:4339:7\n     frame #32: 0x000000010ac6752a\n postgres`BackendRun(port=0x00007ff664404080) at\n postmaster.c:4526:2\n     frame #33: 0x000000010ac66979\n postgres`BackendStartup(port=0x00007ff664404080) at\n postmaster.c:4210:3\n     frame #34: 0x000000010ac658fd postgres`ServerLoop at\n postmaster.c:1739:7\n     frame #35: 0x000000010ac633e4 postgres`PostmasterMain(argc=1,\n argv=0x00007ff661c06cc0) at postmaster.c:1412:11\n     frame #36: 0x000000010ab45679 postgres`main(argc=1,\n argv=0x00007ff661c06cc0) at main.c:210:3\n     frame #37: 0x00007fff6c266cc9 libdyld.dylib`start + 1\n     frame #38: 0x00007fff6c266cc9 libdyld.dylib`start + 1\n (lldb) fr 4\n invalid command 'frame 4'.\n (lldb) fr s 4\n frame #4: 0x000000010ad3aeb3\n postgres`pg_plan_query(querytree=0x00007ff663023848,\n query_string=\"\\nSELECT 'test_' || trim(to_char(i, '000000'))\\n\",\n cursorOptions=256, boundParams=0x0000000000000000) at\n postgres.c:867:2\n    864             return NULL;\n    865\n    866         /* Planner must have a snapshot in case it calls\n user-defined functions. */\n -> 867         Assert(ActiveSnapshotSet());\n    868\n    869         TRACE_POSTGRESQL_QUERY_PLAN_START();\n    870\n (lldb)\n\n\n... non-assert build ...\n\n\n(lldb) bt\n postgres was compiled with optimization - stepping may behave\n oddly; variables may not be available.\n * thread #1, stop reason = signal SIGSTOP\n   * frame #0: 0x0000000101d207db postgres`GetActiveSnapshot at\n snapmgr.c:845:25 [opt]\n     frame #1: 0x0000000101a83f25 postgres`fmgr_sql [inlined]\n postquel_start(es=0x00007fd19789c5a8, fcache=<unavailable>)\n at functions.c:832:9 [opt]\n     frame #2: 0x0000000101a83ec6\n postgres`fmgr_sql(fcinfo=<unavailable>) at functions.c:1161\n [opt]\n     frame #3: 0x0000000101a6fae1\n postgres`ExecInterpExpr(state=<unavailable>,\n econtext=<unavailable>, isnull=0x00007ffeee304a57) at\n execExprInterp.c:680:8 [opt]\n     frame #4: 0x00000001028ddc10 plpgsql.so`exec_eval_expr\n [inlined] ExecEvalExpr(state=<unavailable>,\n econtext=0x00007fd19a818128, isNull=0x00007ffeee304a57) at\n executor.h:303:9 [opt]\n     frame #5: 0x00000001028ddc06 plpgsql.so`exec_eval_expr\n [inlined] exec_eval_simple_expr(estate=0x00007ffeee304f18,\n expr=0x00007fd1980268c0, isNull=<unavailable>,\n rettype=<unavailable>, rettypmod=0x00007fd19a80fe00) at\n pl_exec.c:6328 [opt]\n     frame #6: 0x00000001028ddb2a\n plpgsql.so`exec_eval_expr(estate=0x00007ffeee304f18,\n expr=0x00007fd1980268c0, isNull=<unavailable>,\n rettype=<unavailable>, rettypmod=0x00007fd19a80fe00) at\n pl_exec.c:5833 [opt]\n     frame #7: 0x00000001028dd258\n plpgsql.so`exec_assign_expr(estate=0x00007ffeee304f18,\n target=0x00007fd19a810120, expr=0x00007fd1980268c0) at\n pl_exec.c:4973:10 [opt]\n     frame #8: 0x00000001028d86a2 plpgsql.so`exec_stmt [inlined]\n exec_stmt_assign(estate=0x00007ffeee304f18,\n stmt=0x00007fd198026980) at pl_exec.c:2112:2 [opt]\n     frame #9: 0x00000001028d868a\n plpgsql.so`exec_stmt(estate=0x00007ffeee304f18,\n stmt=0x00007fd198026980) at pl_exec.c:1980 [opt]\n     frame #10: 0x00000001028d9941 plpgsql.so`exec_stmt at\n pl_exec.c:1943:14 [opt]\n     frame #11: 0x00000001028d9914 plpgsql.so`exec_stmt [inlined]\n exec_stmt_fori(estate=0x00007ffeee304f18, stmt=0x00007fd1980267d8)\n at pl_exec.c:2783 [opt]\n     frame #12: 0x00000001028d98a9\n plpgsql.so`exec_stmt(estate=<unavailable>,\n stmt=<unavailable>) at pl_exec.c:2012 [opt]\n     frame #13: 0x00000001028dbf20 plpgsql.so`exec_stmt_block at\n pl_exec.c:1943:14 [opt]\n     frame #14: 0x00000001028dbefb\n plpgsql.so`exec_stmt_block(estate=0x00007ffeee304f18,\n block=0x00007fd198026c20) at pl_exec.c:1884 [opt]\n     frame #15: 0x00000001028d7ec3\n plpgsql.so`exec_stmt(estate=0x00007ffeee304f18,\n stmt=0x00007fd198026c20) at pl_exec.c:1976:9 [opt]\n     frame #16: 0x00000001028d6f45\n plpgsql.so`plpgsql_exec_function(func=0x00007fd19785cc60,\n fcinfo=0x00007ffeee3051f0, simple_eval_estate=<unavailable>,\n simple_eval_resowner=<unavailable>,\n atomic=<unavailable>) at pl_exec.c:610:7 [opt]\n     frame #17: 0x00000001028e9122\n plpgsql.so`plpgsql_call_handler(fcinfo=0x00007ffeee3051f0) at\n pl_handler.c:265:13 [opt]\n     frame #18: 0x0000000101a2b5e4\n postgres`ExecuteCallStmt(stmt=0x00007fd1978102b0,\n params=0x00007fd19a80f4e8, atomic=<unavailable>,\n dest=0x00007fd1978108d0) at functioncmds.c:2232:11 [opt]\n     frame #19: 0x0000000101bd31f5\n postgres`standard_ProcessUtility(pstmt=0x00007fd197810360,\n queryString=\"CALL build_schema(1,11);\",\n context=PROCESS_UTILITY_TOPLEVEL, params=0x0000000000000000,\n queryEnv=0x0000000000000000, dest=0x00007fd1978108d0,\n qc=0x00007ffeee305b80) at utility.c:817:4 [opt]\n     frame #20: 0x0000000101bd2c58\n postgres`ProcessUtility(pstmt=0x00007fd197810360,\n queryString=<unavailable>, context=<unavailable>,\n params=<unavailable>, queryEnv=<unavailable>,\n dest=<unavailable>, qc=0x00007ffeee305b80) at\n utility.c:524:3 [opt]\n     frame #21: 0x0000000101bd29b1\n postgres`PortalRunUtility(portal=0x00007fd197838118,\n pstmt=0x00007fd197810360, isTopLevel=<unavailable>,\n setHoldSnapshot=<unavailable>, dest=<unavailable>,\n qc=0x00007ffeee305b80) at pquery.c:1145:2 [opt]\n     frame #22: 0x0000000101bd2028\n postgres`PortalRunMulti(portal=0x00007fd197838118,\n isTopLevel=true, setHoldSnapshot=false, dest=0x00007fd1978108d0,\n altdest=0x00007fd1978108d0, qc=<unavailable>) at pquery.c:0\n [opt]\n     frame #23: 0x0000000101bd1afd\n postgres`PortalRun(portal=0x00007fd197838118,\n count=9223372036854775807, isTopLevel=true,\n run_once=<unavailable>, dest=0x00007fd1978108d0,\n altdest=0x00007fd1978108d0, qc=0x00007ffeee305b80) at\n pquery.c:786:5 [opt]\n     frame #24: 0x0000000101bd0d5a\n postgres`exec_simple_query(query_string=\"CALL\n build_schema(1,11);\") at postgres.c:1239:10 [opt]\n     frame #25: 0x0000000101bce608\n postgres`PostgresMain(argc=<unavailable>,\n argv=<unavailable>, dbname=<unavailable>,\n username=<unavailable>) at postgres.c:0 [opt]\n     frame #26: 0x0000000101b57e11\n postgres`BackendRun(port=<unavailable>) at\n postmaster.c:4526:2 [opt]\n     frame #27: 0x0000000101b57590 postgres`ServerLoop [inlined]\n BackendStartup(port=<unavailable>) at postmaster.c:4210:3\n [opt]\n     frame #28: 0x0000000101b5756f postgres`ServerLoop at\n postmaster.c:1739 [opt]\n     frame #29: 0x0000000101b5474a\n postgres`PostmasterMain(argc=<unavailable>,\n argv=0x00007fd197406cc0) at postmaster.c:1412:11 [opt]\n     frame #30: 0x0000000101abf3ef\n postgres`main(argc=<unavailable>, argv=<unavailable>)\n at main.c:210:3 [opt]\n     frame #31: 0x00007fff6c266cc9 libdyld.dylib`start + 1\n     frame #32: 0x00007fff6c266cc9 libdyld.dylib`start + 1\n (lldb)", "msg_date": "Mon, 21 Jun 2021 16:19:27 -0700", "msg_from": "Jim Nasby <nasbyj@amazon.com>", "msg_from_op": true, "msg_subject": "Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" }, { "msg_contents": "On Mon, Jun 21, 2021 at 04:19:27PM -0700, Jim Nasby wrote:\n> The following generates an assertion failure. Quick testing with start and\n> stop as well as the core dump shows it’s failing on the execution of\n> `schema_name := schema_name(i)` immediately after COMMIT, because there’s no\n> active snapshot. On a build without asserts I get a failure in\n> GetActiveSnapshot() (second stack trace). This works fine on 12_STABLE, but\n> fails on 13_STABLE and HEAD.\n\nA bisect run points me to the following commit:\ncommit 73b06cf893c9d3bb38c11878a12cc29407e78b6c\nAuthor: Tom Lane <tgl@sss.pgh.pa.us>\nDate: Fri Nov 22 15:02:18 2019 -0500\n\nAvoid taking a new snapshot for an immutable simple expression in plpgsql.\n\nSnapshots would be taken when using non-immutable functions. I'd need\nto study more this code to grab if we could improve the situation\nafter committing the transaction, but, Tom, shouldn't we enforce a\nsnapshot in the case where the expression has not been prepared for\nexecution in the new XACT, even for the immutable case? It seems to \nme that this refers to the case where expr_simple_lxid is still\ninvalid, no?\n--\nMichael", "msg_date": "Tue, 22 Jun 2021 15:34:20 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a\n stored proc" }, { "msg_contents": "Michael Paquier <michael@paquier.xyz> writes:\n> On Mon, Jun 21, 2021 at 04:19:27PM -0700, Jim Nasby wrote:\n>> The following generates an assertion failure.\n\n> A bisect run points me to the following commit:\n> commit 73b06cf893c9d3bb38c11878a12cc29407e78b6c\n> Author: Tom Lane <tgl@sss.pgh.pa.us>\n> Date: Fri Nov 22 15:02:18 2019 -0500\n> Avoid taking a new snapshot for an immutable simple expression in plpgsql.\n\nHmm. I think the real issue here is that commit 84f5c2908 did\nnot cover the \"simple expression\" code path in plpgsql. We\nneed to re-establish an outer snapshot when the next thing\nthat happens after COMMIT is a simple expression, too.\n\nIn this view, 73b06cf8 just removed code that was masking the\nlack of a snapshot during the evaluation of the simple expr\nitself. However, we'd still have had a problem if the simple\nexpr returned a toast pointer that we had to dereference after\nreturning (and popping that snapshot). So I'm thinking\nback-patch to v11, as before.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Tue, 22 Jun 2021 10:58:42 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" }, { "msg_contents": "I wrote:\n> Hmm. I think the real issue here is that commit 84f5c2908 did\n> not cover the \"simple expression\" code path in plpgsql. We\n> need to re-establish an outer snapshot when the next thing\n> that happens after COMMIT is a simple expression, too.\n\nThe attached seems to be enough to resolve Jim's example. I'd like\nto invent a test case that involves a detoast of the simple\nexpression's result, too, to show that transiently pushing a\nsnapshot for the duration of the expression is not the right fix.\n\n\t\t\tregards, tom lane", "msg_date": "Tue, 22 Jun 2021 11:48:46 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" }, { "msg_contents": "I wrote:\n> The attached seems to be enough to resolve Jim's example. I'd like\n> to invent a test case that involves a detoast of the simple\n> expression's result, too, to show that transiently pushing a\n> snapshot for the duration of the expression is not the right fix.\n\nHere we go. This test case gives \"cannot fetch toast data without an\nactive snapshot\" in v11 and v12 branch tips. Since those branches lack\nthe 73b06cf89 optimization, they push a snapshot while calling the\nSQL-language function, thus it doesn't complain. But what comes back\nis toasted, and then we fail trying to detoast it.\n\n\t\t\tregards, tom lane", "msg_date": "Tue, 22 Jun 2021 13:13:08 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" }, { "msg_contents": "On Tue, Jun 22, 2021 at 01:13:08PM -0400, Tom Lane wrote:\n> I wrote:\n> > The attached seems to be enough to resolve Jim's example. I'd like\n> > to invent a test case that involves a detoast of the simple\n> > expression's result, too, to show that transiently pushing a\n> > snapshot for the duration of the expression is not the right fix.\n> \n> Here we go. This test case gives \"cannot fetch toast data without an\n> active snapshot\" in v11 and v12 branch tips. Since those branches lack\n> the 73b06cf89 optimization, they push a snapshot while calling the\n> SQL-language function, thus it doesn't complain. But what comes back\n> is toasted, and then we fail trying to detoast it.\n\nThis causes the server to crash during FETCH.\n\nts=# begin; declare b cursor for VALUES(1); fetch 100 in b;\nBEGIN\nDECLARE CURSOR\nserver closed the connection unexpectedly\n This probably means the server terminated abnormally\n before or while processing the request.\nThe connection to the server was lost. Attempting reset: Failed.\n\n7c337b6b527b7052e6a751f966d5734c56f668b5 is the first bad commit\n|\tcommit 7c337b6b527b7052e6a751f966d5734c56f668b5\n|\tAuthor: Tom Lane <tgl@sss.pgh.pa.us>\n|\tDate: Fri Jun 18 11:22:58 2021 -0400\n|\n|\t Centralize the logic for protective copying of utility statements.\n\nI noticed because it was tickled by pg_dump.\n\n[109037.576659] postgres[32358]: segfault at 9a ip 00007f86a68fa7b1 sp 00007fffd5ae2a88 error 4 in libc-2.17.so[7f86a678b000+1c4000]\n\n< 2021-06-22 20:00:06.557 EDT >LOG: server process (PID 32358) was terminated by signal 11: Segmentation fault\n< 2021-06-22 20:00:06.557 EDT >DETAIL: Failed process was running: FETCH 1000 IN bloboid\n\nCore was generated by `postgres: postgres ts [local] FETCH '.\n\n(gdb) bt\n#0 0x00007f86a68fa7b1 in __strlen_sse2_pminub () from /lib64/libc.so.6\n#1 0x00000000008f7151 in string_hash (key=0x9a, keysize=64) at hashfn.c:667\n#2 0x00000000008c1dd0 in hash_search (hashp=0x1534168, keyPtr=0x9a, action=action@entry=HASH_REMOVE, foundPtr=foundPtr@entry=0x0) at dynahash.c:959\n#3 0x00000000008df29b in PortalDrop (portal=0x1532158, isTopCommit=<optimized out>) at portalmem.c:514\n#4 0x00000000007959a7 in exec_simple_query (query_string=0x14bce88 \"FETCH 1000 IN bloboid\") at postgres.c:1224\n#5 0x0000000000796e2d in PostgresMain (argc=argc@entry=1, argv=argv@entry=0x7fffd5ae2ff0, dbname=0x14b9a78 \"ts\", username=<optimized out>) at postgres.c:4486\n#6 0x00000000004890c1 in BackendRun (port=<optimized out>, port=<optimized out>) at postmaster.c:4507\n#7 BackendStartup (port=0x14e4280) at postmaster.c:4229\n#8 ServerLoop () at postmaster.c:1745\n#9 0x0000000000718dcd in PostmasterMain (argc=argc@entry=3, argv=argv@entry=0x14b79e0) at postmaster.c:1417\n#10 0x0000000000489f32 in main (argc=3, argv=0x14b79e0) at main.c:209\n\n(gdb) p *portal\n$1 = {name = 0x9a <Address 0x9a out of bounds>, prepStmtName = 0x0, portalContext = 0x15f5b00, resowner = 0x14f7d50, cleanup = 0x0, createSubid = 1, activeSubid = 1,\n sourceText = 0x14bce88 \"FETCH 1000 IN bloboid\", commandTag = CMDTAG_FETCH, qc = {commandTag = CMDTAG_FETCH, nprocessed = 0}, stmts = 0x14bdc58, cplan = 0x0,\n portalParams = 0x0, queryEnv = 0x0, strategy = PORTAL_UTIL_SELECT, cursorOptions = 4, run_once = true, status = PORTAL_READY, portalPinned = false, autoHeld = false,\n queryDesc = 0x0, tupDesc = 0x15f5c18, formats = 0x15f5d28, portalSnapshot = 0x0, holdStore = 0x165d688, holdContext = 0x165d570, holdSnapshot = 0x0, atStart = true,\n atEnd = true, portalPos = 0, creation_time = 677721606449684, visible = false}\n\n\n", "msg_date": "Tue, 22 Jun 2021 22:59:16 -0500", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a\n stored proc" }, { "msg_contents": "Justin Pryzby <pryzby@telsasoft.com> writes:\n> This causes the server to crash during FETCH.\n\n> ts=# begin; declare b cursor for VALUES(1); fetch 100 in b;\n> BEGIN\n> DECLARE CURSOR\n> server closed the connection unexpectedly\n> This probably means the server terminated abnormally\n> before or while processing the request.\n> The connection to the server was lost. Attempting reset: Failed.\n\nHm, works for me:\n\nregression=# begin; declare b cursor for VALUES(1); fetch 100 in b;\nBEGIN\nDECLARE CURSOR\n column1 \n---------\n 1\n(1 row)\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 23 Jun 2021 00:07:11 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" }, { "msg_contents": "On Wed, Jun 23, 2021 at 12:07:11AM -0400, Tom Lane wrote:\n> Justin Pryzby <pryzby@telsasoft.com> writes:\n> > This causes the server to crash during FETCH.\n> \n> > ts=# begin; declare b cursor for VALUES(1); fetch 100 in b;\n> > BEGIN\n> > DECLARE CURSOR\n> > server closed the connection unexpectedly\n> > This probably means the server terminated abnormally\n> > before or while processing the request.\n> > The connection to the server was lost. Attempting reset: Failed.\n> \n> Hm, works for me:\n\nI think it's because I had old pg_stat_statements module, and hadn't make -C contrib.\n\nSorry for the noise.\n\n-- \nJustin\n\n\n", "msg_date": "Tue, 22 Jun 2021 23:25:18 -0500", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a\n stored proc" } ]
[ { "msg_contents": "Hi,\n\nWhile scanning for assertion failures on the build farm that don't\nappear to have been discussed, I found this[1] in\n010_truncate_publisher.log on the 13 branch:\n\nTRAP: FailedAssertion(\"tupdesc->tdrefcount <= 0\", File:\n\"/home/bf/build/buildfarm-desmoxytes/REL_13_STABLE/pgsql.build/../pgsql/src/backend/access/common/tupdesc.c\",\nLine: 321)\n2021-06-17 02:17:04.392 CEST [60ca947c.f7a43:4] LOG: server process\n(PID 1014658) was terminated by signal 11: Segmentation fault\n2021-06-17 02:17:04.392 CEST [60ca947c.f7a43:5] DETAIL: Failed\nprocess was running: SELECT pg_catalog.set_config('search_path', '',\nfalse);\n\nThe last thing the segfaulting process said was:\n\n2021-06-17 02:17:03.847 CEST [60ca947f.f7b82:8] LOG: logical decoding\nfound consistent point at 0/157D538\n2021-06-17 02:17:03.847 CEST [60ca947f.f7b82:9] DETAIL: There are no\nrunning transactions.\n\nUnfortunately 13 doesn't log PIDs for assertion failures (hmm, commit\n18c170a08ee could be back-patched?) so it's not clear which process\nthat was, and there's no backtrace.\n\nI don't know if \"pg_catalog.set_config('search_path', '', false)\" is a\nclue that this is related to another recent crash[2] I mentioned, also\nfrom the 13 branch.\n\n[1] https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=desmoxytes&dt=2021-06-16%2023:58:47\n[2] https://www.postgresql.org/message-id/flat/CA%2BhUKG%2BqdF6QE6rcj_Zj5h2qVARM--%2B92sqdmr-0DUSM_0Qu_g%40mail.gmail.com\n\n\n", "msg_date": "Tue, 22 Jun 2021 17:02:46 +1200", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": true, "msg_subject": "subscription/t/010_truncate.pl failure on desmoxytes in REL_13_STABLE" }, { "msg_contents": "On Tue, Jun 22, 2021 at 10:33 AM Thomas Munro <thomas.munro@gmail.com> wrote:\n>\n> While scanning for assertion failures on the build farm that don't\n> appear to have been discussed, I found this[1] in\n> 010_truncate_publisher.log on the 13 branch:\n>\n> TRAP: FailedAssertion(\"tupdesc->tdrefcount <= 0\", File:\n> \"/home/bf/build/buildfarm-desmoxytes/REL_13_STABLE/pgsql.build/../pgsql/src/backend/access/common/tupdesc.c\",\n> Line: 321)\n>\n\nI guess this could be similar to what we see at:\nhttps://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=skink&dt=2021-06-15%2020%3A49%3A26\n\nWe have discussed this in another thread at:\nhttps://www.postgresql.org/message-id/648020.1623854904%40sss.pgh.pa.us\n\nThe reason why I think it is the same is that assertion failure shown\nin the report is from function FreeTupleDesc() which we can call from\npgoutput.c while processing the invalidation. Ideally, we shouldn't\ncall invalidation before initializing the tuple conversion map for\npartitions but in some rare cases, that was happening which we have\nfixed in commit\n(https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=357cb8f07f95665ea533ff534821c22c35b01288).\n\nI see this report is from 16th June 2021 and the commit is on 18th\nJune 2021. So, I am hoping this should have been fixed but if we see\nit again then probably we need to investigate it further.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 22 Jun 2021 15:31:45 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: subscription/t/010_truncate.pl failure on desmoxytes in\n REL_13_STABLE" }, { "msg_contents": "On Tue, Jun 22, 2021 at 10:01 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> On Tue, Jun 22, 2021 at 10:33 AM Thomas Munro <thomas.munro@gmail.com> wrote:\n> > While scanning for assertion failures on the build farm that don't\n> > appear to have been discussed, I found this[1] in\n> > 010_truncate_publisher.log on the 13 branch:\n> >\n> > TRAP: FailedAssertion(\"tupdesc->tdrefcount <= 0\", File:\n> > \"/home/bf/build/buildfarm-desmoxytes/REL_13_STABLE/pgsql.build/../pgsql/src/backend/access/common/tupdesc.c\",\n> > Line: 321)\n>\n> I guess this could be similar to what we see at:\n> https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=skink&dt=2021-06-15%2020%3A49%3A26\n>\n> We have discussed this in another thread at:\n> https://www.postgresql.org/message-id/648020.1623854904%40sss.pgh.pa.us\n>\n> The reason why I think it is the same is that assertion failure shown\n> in the report is from function FreeTupleDesc() which we can call from\n> pgoutput.c while processing the invalidation. Ideally, we shouldn't\n> call invalidation before initializing the tuple conversion map for\n> partitions but in some rare cases, that was happening which we have\n> fixed in commit\n> (https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=357cb8f07f95665ea533ff534821c22c35b01288).\n>\n> I see this report is from 16th June 2021 and the commit is on 18th\n> June 2021. So, I am hoping this should have been fixed but if we see\n> it again then probably we need to investigate it further.\n\nAhh, that makes sense. Thanks for checking, and sorry for the noise.\n\n\n", "msg_date": "Tue, 22 Jun 2021 22:28:19 +1200", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": true, "msg_subject": "Re: subscription/t/010_truncate.pl failure on desmoxytes in\n REL_13_STABLE" }, { "msg_contents": "Thomas Munro <thomas.munro@gmail.com> writes:\n> On Tue, Jun 22, 2021 at 10:01 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>> I see this report is from 16th June 2021 and the commit is on 18th\n>> June 2021. So, I am hoping this should have been fixed but if we see\n>> it again then probably we need to investigate it further.\n\n> Ahh, that makes sense. Thanks for checking, and sorry for the noise.\n\nBTW, the reason that the walsender is still showing its \"query\" as\n\"SELECT pg_config...\" is that pre-v14 versions don't update the\nreported query for replication commands, only plain-SQL commands.\nI recall that we fixed that in HEAD awhile ago; should we consider\nback-patching something for it? It seems quite confusing.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Tue, 22 Jun 2021 09:24:47 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: subscription/t/010_truncate.pl failure on desmoxytes in\n REL_13_STABLE" }, { "msg_contents": "On Tue, Jun 22, 2021 at 6:54 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> Thomas Munro <thomas.munro@gmail.com> writes:\n> > On Tue, Jun 22, 2021 at 10:01 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >> I see this report is from 16th June 2021 and the commit is on 18th\n> >> June 2021. So, I am hoping this should have been fixed but if we see\n> >> it again then probably we need to investigate it further.\n>\n> > Ahh, that makes sense. Thanks for checking, and sorry for the noise.\n>\n> BTW, the reason that the walsender is still showing its \"query\" as\n> \"SELECT pg_config...\" is that pre-v14 versions don't update the\n> reported query for replication commands, only plain-SQL commands.\n> I recall that we fixed that in HEAD awhile ago; should we consider\n> back-patching something for it?\n>\n\nI think it would be great if we can do that. Analyzing such failures\nand in general for replication errors that will be a nice improvement\nand make the jobs of many people a bit easier.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 23 Jun 2021 08:07:43 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: subscription/t/010_truncate.pl failure on desmoxytes in\n REL_13_STABLE" }, { "msg_contents": "Amit Kapila <amit.kapila16@gmail.com> writes:\n> On Tue, Jun 22, 2021 at 6:54 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> BTW, the reason that the walsender is still showing its \"query\" as\n>> \"SELECT pg_config...\" is that pre-v14 versions don't update the\n>> reported query for replication commands, only plain-SQL commands.\n>> I recall that we fixed that in HEAD awhile ago; should we consider\n>> back-patching something for it?\n\n> I think it would be great if we can do that. Analyzing such failures\n> and in general for replication errors that will be a nice improvement\n> and make the jobs of many people a bit easier.\n\nChecking the git history, this was fixed in f560209c6, which also\nincluded some other mostly-cosmetic cleanup. I'm inclined to\npropose back-patching that whole commit, rather than allowing the\ncode in exec_replication_command() to diverge in different branches.\nIt looks like it applies cleanly as far back as v10. Maybe something\ncould be done for 9.6 as well, but since that branch is so close to\nEOL, I doubt it's worth spending extra effort on it.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 24 Jun 2021 13:54:59 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: subscription/t/010_truncate.pl failure on desmoxytes in\n REL_13_STABLE" }, { "msg_contents": "On Thu, Jun 24, 2021 at 11:25 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> Amit Kapila <amit.kapila16@gmail.com> writes:\n> > On Tue, Jun 22, 2021 at 6:54 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >> BTW, the reason that the walsender is still showing its \"query\" as\n> >> \"SELECT pg_config...\" is that pre-v14 versions don't update the\n> >> reported query for replication commands, only plain-SQL commands.\n> >> I recall that we fixed that in HEAD awhile ago; should we consider\n> >> back-patching something for it?\n>\n> > I think it would be great if we can do that. Analyzing such failures\n> > and in general for replication errors that will be a nice improvement\n> > and make the jobs of many people a bit easier.\n>\n> Checking the git history, this was fixed in f560209c6, which also\n> included some other mostly-cosmetic cleanup. I'm inclined to\n> propose back-patching that whole commit, rather than allowing the\n> code in exec_replication_command() to diverge in different branches.\n> It looks like it applies cleanly as far back as v10. Maybe something\n> could be done for 9.6 as well, but since that branch is so close to\n> EOL, I doubt it's worth spending extra effort on it.\n>\n\n+1.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 25 Jun 2021 09:23:44 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: subscription/t/010_truncate.pl failure on desmoxytes in\n REL_13_STABLE" }, { "msg_contents": "Amit Kapila <amit.kapila16@gmail.com> writes:\n> On Thu, Jun 24, 2021 at 11:25 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> Checking the git history, this was fixed in f560209c6, which also\n>> included some other mostly-cosmetic cleanup. I'm inclined to\n>> propose back-patching that whole commit, rather than allowing the\n>> code in exec_replication_command() to diverge in different branches.\n>> It looks like it applies cleanly as far back as v10. Maybe something\n>> could be done for 9.6 as well, but since that branch is so close to\n>> EOL, I doubt it's worth spending extra effort on it.\n\n> +1.\n\nDone that way.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 25 Jun 2021 10:46:57 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: subscription/t/010_truncate.pl failure on desmoxytes in\n REL_13_STABLE" }, { "msg_contents": "On Fri, Jun 25, 2021 at 8:16 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> Amit Kapila <amit.kapila16@gmail.com> writes:\n> > On Thu, Jun 24, 2021 at 11:25 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >> Checking the git history, this was fixed in f560209c6, which also\n> >> included some other mostly-cosmetic cleanup. I'm inclined to\n> >> propose back-patching that whole commit, rather than allowing the\n> >> code in exec_replication_command() to diverge in different branches.\n> >> It looks like it applies cleanly as far back as v10. Maybe something\n> >> could be done for 9.6 as well, but since that branch is so close to\n> >> EOL, I doubt it's worth spending extra effort on it.\n>\n> > +1.\n>\n> Done that way.\n>\n\nThanks!\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Sat, 26 Jun 2021 15:43:52 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: subscription/t/010_truncate.pl failure on desmoxytes in\n REL_13_STABLE" } ]
[ { "msg_contents": "I have accumulated a few patches to improve the output of the scripts in \nsrc/backend/utils/mb/Unicode/ to be less non-standard-looking and fix a \nfew other minor things in that area.\n\nv1-0001-Make-Unicode-makefile-more-parallel-safe.patch\n\nThe makefile rule that calls UCS_to_most.pl was written incorrectly for \nparallel make. The script writes all output files in one go, but the \nrule as written would call the command once for each output file in \nparallel.\n\nv1-0002-Make-UCS_to_most.pl-process-encodings-in-sorted-o.patch\n\nThis mainly just helps eyeball the output while debugging the previous \npatch.\n\nv1-0003-Remove-some-whitespace-in-generated-C-output.patch\n\nImprove a small formatting issue in the output.\n\nv1-0004-Simplify-code-generation-code.patch\n\nThis simplifies the code a bit, which helps with the next patch.\n\nv1-0005-Fix-indentation-in-generated-output.patch\n\nThis changes the indentation in the output from two spaces to a tab.\n\nI haven't included the actual output changes in the last patch, because \nthey would be huge, but the idea should be clear.\n\nAll together, these make the output look closer to how pgindent would \nmake it.", "msg_date": "Tue, 22 Jun 2021 09:20:16 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": true, "msg_subject": "improvements in Unicode tables generation code" }, { "msg_contents": "At Tue, 22 Jun 2021 09:20:16 +0200, Peter Eisentraut <peter.eisentraut@enterprisedb.com> wrote in \n> I have accumulated a few patches to improve the output of the scripts\n> in src/backend/utils/mb/Unicode/ to be less non-standard-looking and\n> fix a few other minor things in that area.\n> \n> v1-0001-Make-Unicode-makefile-more-parallel-safe.patch\n> \n> The makefile rule that calls UCS_to_most.pl was written incorrectly\n> for parallel make. The script writes all output files in one go, but\n> the rule as written would call the command once for each output file\n> in parallel.\n\nI was annoyed by that behavior but haven't found how to stop that. It\nlooks to work. (But I haven't run it for me for the reason at the end\nof this mail.)\n\n> v1-0002-Make-UCS_to_most.pl-process-encodings-in-sorted-o.patch\n> \n> This mainly just helps eyeball the output while debugging the previous\n> patch.\n> \n> v1-0003-Remove-some-whitespace-in-generated-C-output.patch\n> \n> Improve a small formatting issue in the output.\n\nThese look just fine.\n\n> v1-0004-Simplify-code-generation-code.patch\n> \n> This simplifies the code a bit, which helps with the next patch.\n\nThis simplifies the code in exchange of allowing a comma after the\nlast element of array literals. I'm fine with it as long as we allow\nthat style in the tree.\n\n> v1-0005-Fix-indentation-in-generated-output.patch\n> \n> This changes the indentation in the output from two spaces to a tab.\n> \n> I haven't included the actual output changes in the last patch,\n> because they would be huge, but the idea should be clear.\n> \n> All together, these make the output look closer to how pgindent would\n> make it.\n\nI agree to the fix.\n\nMmm. (although, somewhat unrelated to this patch set) I tried this but\nI found that www.unicode.org doesn't respond (for at least these\nseveral days). I'm not sure what is happening here.\n\n> wget -O 8859-2.TXT --no-use-server-timestamps https://www.unicode.org/Public/MAPPINGS/ISO8859/8859-2.TXT\n> --2021-06-22 17:09:34-- https://www.unicode.org/Public/MAPPINGS/ISO8859/8859-2.TXT\n> Resolving www.unicode.org (www.unicode.org)... 66.34.208.12\n> Connecting to www.unicode.org (www.unicode.org)|66.34.208.12|:443... \n(timeouts)\n\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center\n\n\n", "msg_date": "Tue, 22 Jun 2021 17:17:26 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": false, "msg_subject": "Re: improvements in Unicode tables generation code" }, { "msg_contents": "On 22/06/2021 10:20, Peter Eisentraut wrote:\n> I have accumulated a few patches to improve the output of the scripts in\n> src/backend/utils/mb/Unicode/ to be less non-standard-looking and fix a\n> few other minor things in that area.\n> \n> v1-0001-Make-Unicode-makefile-more-parallel-safe.patch\n> \n> The makefile rule that calls UCS_to_most.pl was written incorrectly for\n> parallel make. The script writes all output files in one go, but the\n> rule as written would call the command once for each output file in\n> parallel.\n\nThis could use a comment. At a quick glance, I don't understand what all \nthe $(wordlist ...) magic does.\n\nPerhaps we should change the script or Makefile so that it doesn't \ncreate all the maps in one go?\n\n> v1-0002-Make-UCS_to_most.pl-process-encodings-in-sorted-o.patch\n> \n> This mainly just helps eyeball the output while debugging the previous\n> patch.\n\n+1\n\n> v1-0003-Remove-some-whitespace-in-generated-C-output.patch\n> \n> Improve a small formatting issue in the output.\n\nI'm surprised the added \\n in the perl code didn't result in extra \nnewlines in the outputs.\n\n> v1-0004-Simplify-code-generation-code.patch\n> \n> This simplifies the code a bit, which helps with the next patch.\n\nIf we do that, let's add the trailing commas to the other arrays too, \nnot just the combined maps.\n\nNo objection, but how does this help the next patch?\n\nIf we want to avoid the stray commas (and I think they are a little \nugly, but that's a matter of taste), we could adopt the approach that \nprint_radix_table() uses to avoid the comma. That seems simpler than \nwhat print_from_utf8_combined_map and print_to_utf8_combined_map are doing.\n\n> v1-0005-Fix-indentation-in-generated-output.patch\n> \n> This changes the indentation in the output from two spaces to a tab.\n> \n> I haven't included the actual output changes in the last patch, because\n> they would be huge, but the idea should be clear.\n> \n> All together, these make the output look closer to how pgindent would\n> make it.\n\nThanks!\n\n- Heikki\n\n\n", "msg_date": "Tue, 22 Jun 2021 11:20:46 +0300", "msg_from": "Heikki Linnakangas <hlinnaka@iki.fi>", "msg_from_op": false, "msg_subject": "Re: improvements in Unicode tables generation code" }, { "msg_contents": "At Tue, 22 Jun 2021 11:20:46 +0300, Heikki Linnakangas <hlinnaka@iki.fi> wrote in \n> On 22/06/2021 10:20, Peter Eisentraut wrote:\n> > v1-0004-Simplify-code-generation-code.patch\n> > This simplifies the code a bit, which helps with the next patch.\n> \n> If we do that, let's add the trailing commas to the other arrays too,\n> not just the combined maps.\n> \n> No objection, but how does this help the next patch?\n> \n> If we want to avoid the stray commas (and I think they are a little\n> ugly, but that's a matter of taste), we could adopt the approach that\n> print_radix_table() uses to avoid the comma. That seems simpler than\n> what print_from_utf8_combined_map and print_to_utf8_combined_map are\n> doing.\n\n+1 for adopting the same method with print_radix_table *if* we do want\nto avoid the stray commans.\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center\n\n\n", "msg_date": "Tue, 22 Jun 2021 17:33:07 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": false, "msg_subject": "Re: improvements in Unicode tables generation code" }, { "msg_contents": "On 22.06.21 10:20, Heikki Linnakangas wrote:\n> On 22/06/2021 10:20, Peter Eisentraut wrote:\n>> I have accumulated a few patches to improve the output of the scripts in\n>> src/backend/utils/mb/Unicode/ to be less non-standard-looking and fix a\n>> few other minor things in that area.\n>>\n>> v1-0001-Make-Unicode-makefile-more-parallel-safe.patch\n>>\n>> The makefile rule that calls UCS_to_most.pl was written incorrectly for\n>> parallel make.  The script writes all output files in one go, but the\n>> rule as written would call the command once for each output file in\n>> parallel.\n> \n> This could use a comment. At a quick glance, I don't understand what all \n> the $(wordlist ...) magic does.\n> \n> Perhaps we should change the script or Makefile so that it doesn't \n> create all the maps in one go?\n\nI agree, either comment it better or just write one file at a time. \nI'll take another look at that.\n\n>> v1-0003-Remove-some-whitespace-in-generated-C-output.patch\n>>\n>> Improve a small formatting issue in the output.\n> \n> I'm surprised the added \\n in the perl code didn't result in extra \n> newlines in the outputs.\n\nTrue, I'll have to check that again. I suspect that \\n actually belongs \nto patch 0004.\n\n>> v1-0004-Simplify-code-generation-code.patch\n>>\n>> This simplifies the code a bit, which helps with the next patch.\n> \n> If we do that, let's add the trailing commas to the other arrays too, \n> not just the combined maps.\n> \n> No objection, but how does this help the next patch?\n\nMainly it just moves things around so that each print normally starts at \nthe beginning of a line and concludes with a \\n.\n\n\n", "msg_date": "Wed, 23 Jun 2021 10:55:41 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Re: improvements in Unicode tables generation code" }, { "msg_contents": "On 23.06.21 10:55, Peter Eisentraut wrote:\n>>> v1-0001-Make-Unicode-makefile-more-parallel-safe.patch\n>>>\n>>> The makefile rule that calls UCS_to_most.pl was written incorrectly for\n>>> parallel make.  The script writes all output files in one go, but the\n>>> rule as written would call the command once for each output file in\n>>> parallel.\n>>\n>> This could use a comment. At a quick glance, I don't understand what \n>> all the $(wordlist ...) magic does.\n>>\n>> Perhaps we should change the script or Makefile so that it doesn't \n>> create all the maps in one go?\n> \n> I agree, either comment it better or just write one file at a time. I'll \n> take another look at that.\n\nHere is a patch that does it one file (pair) at a time. The other rules \nbesides UCS_to_most.pl actually had the same problem, since they produce \ntwo output files, so running in parallel called each script twice. In \nthis patch, all of that is heavily refactored and works correctly now. \nNote that UCS_to_most.pl already accepted a command-line argument to \nspecify which encoding to work with.", "msg_date": "Tue, 20 Jul 2021 13:57:16 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Re: improvements in Unicode tables generation code" }, { "msg_contents": "On 20.07.21 13:57, Peter Eisentraut wrote:\n>>> Perhaps we should change the script or Makefile so that it doesn't \n>>> create all the maps in one go?\n>>\n>> I agree, either comment it better or just write one file at a time. \n>> I'll take another look at that.\n> \n> Here is a patch that does it one file (pair) at a time.  The other rules \n> besides UCS_to_most.pl actually had the same problem, since they produce \n> two output files, so running in parallel called each script twice.  In \n> this patch, all of that is heavily refactored and works correctly now. \n> Note that UCS_to_most.pl already accepted a command-line argument to \n> specify which encoding to work with.\n\nHere is an updated patch with a thinko fix that made the previous patch \nnot actually work.", "msg_date": "Tue, 28 Sep 2021 10:25:20 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Re: improvements in Unicode tables generation code" }, { "msg_contents": "\nOn 28.09.21 10:25, Peter Eisentraut wrote:\n> \n> On 20.07.21 13:57, Peter Eisentraut wrote:\n>>>> Perhaps we should change the script or Makefile so that it doesn't \n>>>> create all the maps in one go?\n>>>\n>>> I agree, either comment it better or just write one file at a time. \n>>> I'll take another look at that.\n>>\n>> Here is a patch that does it one file (pair) at a time.  The other \n>> rules besides UCS_to_most.pl actually had the same problem, since they \n>> produce two output files, so running in parallel called each script \n>> twice.  In this patch, all of that is heavily refactored and works \n>> correctly now. Note that UCS_to_most.pl already accepted a \n>> command-line argument to specify which encoding to work with.\n> \n> Here is an updated patch with a thinko fix that made the previous patch \n> not actually work.\n\nI have committed this one and closed the CF entry.\n\n\n", "msg_date": "Mon, 4 Oct 2021 20:36:32 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Re: improvements in Unicode tables generation code" } ]
[ { "msg_contents": "Hi,\n\nHere's a curious one-off failure in test_integerset:\n\n+ERROR: iterate returned wrong value; got 519985430528, expected 485625692160\n\nhttps://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=rhinoceros&dt=2021-04-01%2018:19:47\n\n\n", "msg_date": "Tue, 22 Jun 2021 20:53:59 +1200", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": true, "msg_subject": "Cosmic ray hits integerset" }, { "msg_contents": "On 2021-Jun-22, Thomas Munro wrote:\n\n> Hi,\n> \n> Here's a curious one-off failure in test_integerset:\n> \n> +ERROR: iterate returned wrong value; got 519985430528, expected 485625692160\n\nCosmic rays indeed. The base-2 representation of the expected value is\n111000100010001100011000000000000000000\nand that of the actual value is\n111100100010001100011000000000000000000\n\nThere's a single bit of difference.\n\n-- \n�lvaro Herrera Valdivia, Chile\n\"No hay hombre que no aspire a la plenitud, es decir,\nla suma de experiencias de que un hombre es capaz\"\n\n\n", "msg_date": "Tue, 22 Jun 2021 10:21:19 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Cosmic ray hits integerset" }, { "msg_contents": "\n\n> 22 июня 2021 г., в 19:21, Alvaro Herrera <alvherre@alvh.no-ip.org> написал(а):\n> \n> On 2021-Jun-22, Thomas Munro wrote:\n> \n>> Hi,\n>> \n>> Here's a curious one-off failure in test_integerset:\n>> \n>> +ERROR: iterate returned wrong value; got 519985430528, expected 485625692160\n> \n> Cosmic rays indeed. The base-2 representation of the expected value is\n> 111000100010001100011000000000000000000\n> and that of the actual value is\n> 111100100010001100011000000000000000000\n> \n> There's a single bit of difference.\n\nI've tried to explain this as not a single-event upset, but integer overflow in 30-bits mode of simple8b somewhere. But found nothing so far. Actual error is in bit 35, and next mode is 60-bit mode.\n\nLooks like cosmic ray to me too.\n\nBest regards, Andrey Borodin.\n\n", "msg_date": "Tue, 22 Jun 2021 19:40:26 +0500", "msg_from": "Andrey Borodin <x4mmm@yandex-team.ru>", "msg_from_op": false, "msg_subject": "Re: Cosmic ray hits integerset" }, { "msg_contents": "Hi, Asking out of pure technical curiosity about \"the rhinoceros\" - what kind of animal is it ? Physical box or VM? How one could get dmidecode(1) / dmesg(1) / mcelog (1) from what's out there (e.g. does it run ECC or not ?)\n\n-J.\n\n> -----Original Message-----\n> From: Alvaro Herrera <alvherre@alvh.no-ip.org>\n> Sent: Tuesday, June 22, 2021 4:21 PM\n> To: Thomas Munro <thomas.munro@gmail.com>\n> Cc: pgsql-hackers <pgsql-hackers@postgresql.org>\n> Subject: Re: Cosmic ray hits integerset\n> \n> On 2021-Jun-22, Thomas Munro wrote:\n> \n> > Hi,\n> >\n> > Here's a curious one-off failure in test_integerset:\n> >\n> > +ERROR: iterate returned wrong value; got 519985430528, expected\n> > +485625692160\n> \n> Cosmic rays indeed. The base-2 representation of the expected value is\n> 111000100010001100011000000000000000000\n> and that of the actual value is\n> 111100100010001100011000000000000000000\n> \n> There's a single bit of difference.\n\n\n\n", "msg_date": "Wed, 7 Jul 2021 06:53:51 +0000", "msg_from": "Jakub Wartak <Jakub.Wartak@tomtom.com>", "msg_from_op": false, "msg_subject": "RE: Cosmic ray hits integerset" }, { "msg_contents": "On 7/7/21 2:53 AM, Jakub Wartak wrote:\n> Hi, Asking out of pure technical curiosity about \"the rhinoceros\" - what kind of animal is it ? Physical box or VM? How one could get dmidecode(1) / dmesg(1) / mcelog (1) from what's out there (e.g. does it run ECC or not ?)\n\n\nRhinoceros is just a VM on a simple desktop machine. Nothing fancy.\n\nJoe\n\n-- \nCrunchy Data - http://crunchydata.com\nPostgreSQL Support for Secure Enterprises\nConsulting, Training, & Open Source Development\n\n\n", "msg_date": "Wed, 7 Jul 2021 08:14:03 -0400", "msg_from": "Joe Conway <mail@joeconway.com>", "msg_from_op": false, "msg_subject": "Re: Cosmic ray hits integerset" }, { "msg_contents": "Fwiw, yes it could be a cosmic ray.\n\nIt could also just be marginally bad ram. Bad ram is notoriously hard\nto reliably test for. It can be very sensitive to the exact bit\npattern stored in it, the timing of reads and writes, and other\nfactors. The whole point of the rowhammer attacks is to push some of\nthose timing factors hard but the same failures can happen randomly.\n\nOn Wed, 7 Jul 2021 at 08:14, Joe Conway <mail@joeconway.com> wrote:\n>\n> On 7/7/21 2:53 AM, Jakub Wartak wrote:\n> > Hi, Asking out of pure technical curiosity about \"the rhinoceros\" - what kind of animal is it ? Physical box or VM? How one could get dmidecode(1) / dmesg(1) / mcelog (1) from what's out there (e.g. does it run ECC or not ?)\n>\n>\n> Rhinoceros is just a VM on a simple desktop machine. Nothing fancy.\n>\n> Joe\n>\n> --\n> Crunchy Data - http://crunchydata.com\n> PostgreSQL Support for Secure Enterprises\n> Consulting, Training, & Open Source Development\n>\n>\n\n\n-- \ngreg\n\n\n", "msg_date": "Sun, 11 Jul 2021 22:09:53 -0400", "msg_from": "Greg Stark <stark@mit.edu>", "msg_from_op": false, "msg_subject": "Re: Cosmic ray hits integerset" } ]
[ { "msg_contents": "> But making everything slower will be a hard sell, because vast majority of\r\n> workloads already running on Postgres don't have this issue at all, so\r\n> for them it's not worth the expense. Following the insurance analogy,\r\n> selling tornado insurance in Europe is mostly pointless.\r\n>\r\n\r\nAgree. I've been surprised about NOT hearing complaints from PostgreSQL\r\ncustomers about a particular \"bad\" plan choice that was common in other\r\nrdbms products where large, complex queries were the norm. The situation\r\noccurs late in a plan with many joins where a hash join can be used and \r\nwhere either side is estimated to fit in memory. On one side is a base table \r\nwith cardinality that we have statistics for, while the other side has an\r\nestimated cardinality that is the result of many estimates each of which\r\nhas error that can compound, and that in some cases amounts to a wild guess.\r\n(e.g. what is the selectivity of SUM(x) < 12 ?). If the planner's point estimate \r\nof cardinality is such that both sides could fit in memory, then a bad plan can\r\neasily be made. As Peter said, [ most ] humans have no trouble dealing with \r\nthese kind of situations. They take the risk of being wrong into account.\r\n\r\nSo in our world, the useful numbers are 0, 1, measured N, and estimated N,\r\nbut we don't distinguish between measured N and estimated N.\r\n\r\nBut that doesn't mean that OLTP customers would be willing to accept\r\nslightly suboptimal plans to mitigate a risk they don't experience.\r\n\r\n> Insurance is also about personal preference / risk tolerance. Maybe I'm\r\n> fine with accepting risk that my house burns down, or whatever ...\r\n\r\nRight, and that's why the problem mentioned above is still out there\r\nannoying customers who have complex plans. To them it looks like\r\nan obviously bad plan choice.\r\n\r\nSomething that might help is to have the planner cost be a structure instead\r\nof a number. Costs of plans that are deemed \"risky\" are accumulated \r\nseparately from plans that make no risky choices, and for a given set\r\nof join items you keep the minimum cost plan of both types. It may happen that all\r\nplans eventually make a risky choice, in which case you take the plan with the minimum\r\ncost, but if there exists a plan with no risky choices, then the minimum cost\r\nplan with no risky choices is chosen, with a GUC that enables a customer to ignore\r\nrisk when making this choice. This is not in the spirit of the hoped for simple heuristic,\r\nand it would be heuristic in its classification of plans that are risky, but in the NLJ case \r\nthe cost of an unparameterized NLJ could be deemed risky if the cardinality of the inner \r\nrelation is not 0, 1, or measured N.\r\n\r\n\r\n\r\n\r\n", "msg_date": "Tue, 22 Jun 2021 11:10:28 +0000", "msg_from": "\"Finnerty, Jim\" <jfinnert@amazon.com>", "msg_from_op": true, "msg_subject": "Re:disfavoring unparameterized nested loops" } ]
[ { "msg_contents": "On Mon, Jun 21, 2021 at 04:19:27PM -0700, Jim Nasby wrote:\n> The following generates an assertion failure. Quick testing with start and\n> stop as well as the core dump shows it’s failing on the execution of\n> `schema_name := schema_name(i)` immediately after COMMIT, because there’s\nno\n> active snapshot. On a build without asserts I get a failure in\n> GetActiveSnapshot() (second stack trace). This works fine on 12_STABLE,\nbut\n> fails on 13_STABLE and HEAD.\n\nFor me it's a typo.\nneed_snapshot = (expr->expr_simple_mutable || !estate->readonly_func);\n\nHEAD with no assertion:\n\n CREATE OR REPLACE FUNCTION public.schema_name(i integer)\npostgres-# RETURNS text\npostgres-# LANGUAGE sql\npostgres-# IMMUTABLE\npostgres-# AS $function$\npostgres$# SELECT 'test_' || trim(to_char(i, '000000'))\npostgres$# $function$;\nCREATE FUNCTION\npostgres=# CREATE OR REPLACE PROCEDURE public.build_schema(start integer,\nstop\npostgres(# integer, commit_interval integer DEFAULT 10, do_insert boolean\nDEFAULT true)\npostgres-# LANGUAGE plpgsql\npostgres-# AS $procedure$\npostgres$# DECLARE\npostgres$# schema_name text;\npostgres$# BEGIN\npostgres$# FOR i IN start .. stop LOOP\npostgres$# schema_name := schema_name(i);\npostgres$# IF i % commit_interval = 0 THEN\npostgres$# --RAISE NOTICE 'COMMIT CREATE step %', i;\npostgres$# COMMIT;\npostgres$# END IF;\npostgres$# END LOOP;\npostgres$# END$procedure$;\nCREATE PROCEDURE\npostgres=# CALL build_schema(1,11);\nCALL\npostgres=# CALL build_schema(1,11);\nCALL\npostgres=# CALL build_schema(1,11);\nCALL\n\nThe comments in the function are clear:\nIf expression is mutable OR is a non-read-only function, so need a snapshot.\n\nCan you test please?\n\nregards,\nRanier Vilela", "msg_date": "Tue, 22 Jun 2021 09:56:09 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" }, { "msg_contents": "On Tue, Jun 22, 2021 at 10:56 PM Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> On Mon, Jun 21, 2021 at 04:19:27PM -0700, Jim Nasby wrote:\n> > The following generates an assertion failure. Quick testing with start and\n> > stop as well as the core dump shows it’s failing on the execution of\n> > `schema_name := schema_name(i)` immediately after COMMIT, because there’s no\n> > active snapshot. On a build without asserts I get a failure in\n> > GetActiveSnapshot() (second stack trace). This works fine on 12_STABLE, but\n> > fails on 13_STABLE and HEAD.\n>\n> For me it's a typo.\n> need_snapshot = (expr->expr_simple_mutable || !estate->readonly_func);\n>\n...\n>\n> The comments in the function are clear:\n> If expression is mutable OR is a non-read-only function, so need a snapshot.\n>\n\nI have to agree with you.\nLooks like the \"&&\" should really be an \"||\". The explanation in the\ncode comment is pretty clear on this, as you say.\n\nI was able to reproduce the problem using your example. It produced a\ncoredump, pointing to the failed \"Assert(ActiveSnapshotSet());\" in\npg_plan_query().\nI also verified that your patch seemed to fix the problem.\n\nHowever, I found that this issue is masked by the following recent commit:\n\ncommit d102aafb6259a6a412803d4b1d8c4f00aa17f67e\nAuthor: Tom Lane <tgl@sss.pgh.pa.us>\nDate: Tue Jun 22 17:48:39 2021 -0400\n Restore the portal-level snapshot for simple expressions, too.\n\nWith this commit in place, there is an active snapshot in place\nanyway, so for your example, that Assert no longer fires as it did\nbefore.\nHowever, I still think that your fix is valid and needed.\n\nRegards,\nGreg Nancarrow\nFujitsu Australia\n\n\n", "msg_date": "Wed, 23 Jun 2021 16:04:12 +1000", "msg_from": "Greg Nancarrow <gregn4422@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" }, { "msg_contents": "Em qua., 23 de jun. de 2021 às 03:04, Greg Nancarrow <gregn4422@gmail.com>\nescreveu:\n\n> On Tue, Jun 22, 2021 at 10:56 PM Ranier Vilela <ranier.vf@gmail.com>\n> wrote:\n> >\n> > On Mon, Jun 21, 2021 at 04:19:27PM -0700, Jim Nasby wrote:\n> > > The following generates an assertion failure. Quick testing with start\n> and\n> > > stop as well as the core dump shows it’s failing on the execution of\n> > > `schema_name := schema_name(i)` immediately after COMMIT, because\n> there’s no\n> > > active snapshot. On a build without asserts I get a failure in\n> > > GetActiveSnapshot() (second stack trace). This works fine on\n> 12_STABLE, but\n> > > fails on 13_STABLE and HEAD.\n> >\n> > For me it's a typo.\n> > need_snapshot = (expr->expr_simple_mutable || !estate->readonly_func);\n> >\n> ...\n> >\n> > The comments in the function are clear:\n> > If expression is mutable OR is a non-read-only function, so need a\n> snapshot.\n> >\n>\n> I have to agree with you.\n> Looks like the \"&&\" should really be an \"||\". The explanation in the\n> code comment is pretty clear on this, as you say.\n>\n\n> I was able to reproduce the problem using your example. It produced a\n> coredump, pointing to the failed \"Assert(ActiveSnapshotSet());\" in\n> pg_plan_query().\n>\nYes before d102aafb6, Jim Nasby example fires a coredump.\n\nI also verified that your patch seemed to fix the problem.\n>\nBoth fix the Jim example.\n\n\n> However, I found that this issue is masked by the following recent commit:\n>\n> commit d102aafb6259a6a412803d4b1d8c4f00aa17f67e\n> Author: Tom Lane <tgl@sss.pgh.pa.us>\n> Date: Tue Jun 22 17:48:39 2021 -0400\n> Restore the portal-level snapshot for simple expressions, too.\n>\n> With this commit in place, there is an active snapshot in place\n> anyway, so for your example, that Assert no longer fires as it did\n> before.\n> However, I still think that your fix is valid and needed.\n>\nI agreed.\nBefore 84f5c29, only the not-read-only function NOT push a new snapshot.\nNow only mutable expression AND not-read-only function, pushes a new\nsnapshot.\nUnder which conditions did Jim's example not fit?\n\nWith && is very restricted.\nWe have:\n1. Mutable expression AND not-read-only function -> push a new snapshot\n2. Mutable expression AND read-only-function -> not push a new snapshot\n3. Not mutable expression AND not-read-only function -> not push a new\nsnapshot\n4. Not mutable expression AND read-only function -> not push a new snapshot\n\nWe agree that 2 and 3 should push a new snapshot.\n\nIf the user function is declared as not-read-only, even though read-only,\nit's a failure to be fixed either by the user, or by the parser, not here.\n\nregards,\nRanier Vilela\n\nEm qua., 23 de jun. de 2021 às 03:04, Greg Nancarrow <gregn4422@gmail.com> escreveu:On Tue, Jun 22, 2021 at 10:56 PM Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> On Mon, Jun 21, 2021 at 04:19:27PM -0700, Jim Nasby wrote:\n> > The following generates an assertion failure. Quick testing with start and\n> > stop as well as the core dump shows it’s failing on the execution of\n> > `schema_name := schema_name(i)` immediately after COMMIT, because there’s no\n> > active snapshot. On a build without asserts I get a failure in\n> > GetActiveSnapshot() (second stack trace). This works fine on 12_STABLE, but\n> > fails on 13_STABLE and HEAD.\n>\n> For me it's a typo.\n> need_snapshot = (expr->expr_simple_mutable || !estate->readonly_func);\n>\n...\n>\n> The comments in the function are clear:\n> If expression is mutable OR is a non-read-only function, so need a snapshot.\n>\n\nI have to agree with you.\nLooks like the \"&&\" should really be an \"||\". The explanation in the\ncode comment is pretty clear on this, as you say.\n\nI was able to reproduce the problem using your example. It produced a\ncoredump, pointing to the failed \"Assert(ActiveSnapshotSet());\" in\npg_plan_query().Yes before  \nd102aafb6, Jim Nasby example fires a coredump.\n\nI also verified that your patch seemed to fix the problem.Both fix the Jim example. \n\nHowever, I found that this issue is masked by the following recent commit:\n\ncommit d102aafb6259a6a412803d4b1d8c4f00aa17f67e\nAuthor: Tom Lane <tgl@sss.pgh.pa.us>\nDate:   Tue Jun 22 17:48:39 2021 -0400\n    Restore the portal-level snapshot for simple expressions, too.\n\nWith this commit in place, there is an active snapshot in place\nanyway, so for your example, that Assert no longer fires as it did\nbefore.\nHowever, I still think that your fix is valid and needed.I agreed.Before 84f5c29, only the not-read-only function NOT push a new snapshot.Now only mutable expression AND not-read-only function, pushes a new snapshot.Under which conditions did Jim's example not fit?With && is very restricted.We have:1. Mutable expression AND not-read-only function -> push a new snapshot2. Mutable expression AND read-only-function -> not push a new snapshot3. Not mutable expression AND not-read-only function -> not push a new snapshot4. Not mutable expression AND read-only function -> not push a new snapshotWe agree that 2 and 3 should push a new snapshot.If the user function is declared as not-read-only, even though read-only,it's a failure to be fixed either by the user, or by the parser, not here. regards,Ranier Vilela", "msg_date": "Wed, 23 Jun 2021 08:56:06 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" }, { "msg_contents": "Greg Nancarrow <gregn4422@gmail.com> writes:\n> On Tue, Jun 22, 2021 at 10:56 PM Ranier Vilela <ranier.vf@gmail.com> wrote:\n>> The comments in the function are clear:\n>> If expression is mutable OR is a non-read-only function, so need a snapshot.\n\n> I have to agree with you.\n> Looks like the \"&&\" should really be an \"||\". The explanation in the\n> code comment is pretty clear on this, as you say.\n\nThe code is correct as-is; the proposed change would result in taking\nmore snapshots than needed. Perhaps the comment needs revision, since\nyou both misread it. The comment is written in terms of \"when can we\nskip taking a snapshot\", while the test in the code is written for\nthe inverse condition \"when do we need a snapshot\".\n\n> I also verified that your patch seemed to fix the problem.\n\nIt accidentally masked it ... but only partially.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 23 Jun 2021 09:01:55 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" }, { "msg_contents": "On Wed, Jun 23, 2021 at 11:01 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n>\n> The code is correct as-is; the proposed change would result in taking\n> more snapshots than needed. Perhaps the comment needs revision, since\n> you both misread it. The comment is written in terms of \"when can we\n> skip taking a snapshot\", while the test in the code is written for\n> the inverse condition \"when do we need a snapshot\".\n\nYes, you're right.\nEven though I did realise that the comment was talking about the\ninverse, the condition for needing a snapshot still seemed too narrow,\nbased on the comment, but checking the cases again, it is correct.\n\nPerhaps that code could have been written as the following, to better\nalign with the comments:\n\n skip_snapshot = (!expr->expr_simple_mutable || estate->readonly_func);\n if (!skip_snapshot)\n {\n ...\n }\n\n ...\n\n if (!skip_snapshot)\n PopActiveSnapshot();\n\n\nRegards,\nGreg Nancarrow\nFujitsu Australia\n\n\n", "msg_date": "Thu, 24 Jun 2021 10:45:33 +1000", "msg_from": "Greg Nancarrow <gregn4422@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" }, { "msg_contents": "Greg Nancarrow <gregn4422@gmail.com> writes:\n> On Wed, Jun 23, 2021 at 11:01 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> The comment is written in terms of \"when can we\n>> skip taking a snapshot\", while the test in the code is written for\n>> the inverse condition \"when do we need a snapshot\".\n\n> Perhaps that code could have been written as the following, to better\n> align with the comments:\n> [ invert the variable's meaning ]\n\nYeah, perhaps. I remember feeling that the code was clearer this\nway (because \"if (!skip_snapshot)\" seems a little backwards).\nBut it might be better to make the code fit the comment than to\ntry to invert the description in the comment.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 23 Jun 2021 20:51:05 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" }, { "msg_contents": "Em qua., 23 de jun. de 2021 às 21:45, Greg Nancarrow <gregn4422@gmail.com>\nescreveu:\n\n> On Wed, Jun 23, 2021 at 11:01 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >\n> >\n> > The code is correct as-is; the proposed change would result in taking\n> > more snapshots than needed. Perhaps the comment needs revision, since\n> > you both misread it. The comment is written in terms of \"when can we\n> > skip taking a snapshot\", while the test in the code is written for\n> > the inverse condition \"when do we need a snapshot\".\n>\n> Yes, you're right.\n> Even though I did realise that the comment was talking about the\n> inverse, the condition for needing a snapshot still seemed too narrow,\n> based on the comment, but checking the cases again, it is correct.\n>\nI still don't agree. But we leave the code like that, to see how it\nbehaves.\n\n\n> Perhaps that code could have been written as the following, to better\n> align with the comments:\n>\n> skip_snapshot = (!expr->expr_simple_mutable || estate->readonly_func);\n> if (!skip_snapshot)\n> {\n> ...\n> }\n>\n> ...\n>\n> if (!skip_snapshot)\n> PopActiveSnapshot();\n>\n-1. That way it's readability is much worse, more complicated and IMHO it\ngenerates worse asm.\n\nregards,\nRanier Vilela\n\nEm qua., 23 de jun. de 2021 às 21:45, Greg Nancarrow <gregn4422@gmail.com> escreveu:On Wed, Jun 23, 2021 at 11:01 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n>\n> The code is correct as-is; the proposed change would result in taking\n> more snapshots than needed.  Perhaps the comment needs revision, since\n> you both misread it.  The comment is written in terms of \"when can we\n> skip taking a snapshot\", while the test in the code is written for\n> the inverse condition \"when do we need a snapshot\".\n\nYes, you're right.\nEven though I did realise that the comment was talking about the\ninverse, the condition for needing a snapshot still seemed too narrow,\nbased on the comment, but checking the cases again, it is correct.\nI still don't agree. But we leave the code like that, to see how it behaves.  \n\nPerhaps that code could have been written as the following, to better\nalign with the comments:\n\n    skip_snapshot = (!expr->expr_simple_mutable || estate->readonly_func);\n    if (!skip_snapshot)\n    {\n        ...\n    }\n\n    ...\n\n    if (!skip_snapshot)\n        PopActiveSnapshot();\n-1. That way it's readability is much worse, more complicated and IMHO it generates worse asm. regards,Ranier Vilela", "msg_date": "Thu, 24 Jun 2021 13:02:49 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" }, { "msg_contents": "Em qua., 23 de jun. de 2021 às 21:51, Tom Lane <tgl@sss.pgh.pa.us> escreveu:\n\n> Greg Nancarrow <gregn4422@gmail.com> writes:\n> > On Wed, Jun 23, 2021 at 11:01 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >> The comment is written in terms of \"when can we\n> >> skip taking a snapshot\", while the test in the code is written for\n> >> the inverse condition \"when do we need a snapshot\".\n>\n> > Perhaps that code could have been written as the following, to better\n> > align with the comments:\n> > [ invert the variable's meaning ]\n>\n> Yeah, perhaps. I remember feeling that the code was clearer this\n> way (because \"if (!skip_snapshot)\" seems a little backwards).\n> But it might be better to make the code fit the comment than to\n> try to invert the description in the comment.\n>\nI'm not a native speaker, so I would be of little help with clearer and\nmore elusive comments.\nIf you both agree that the current code is correct, please correct the\ncomments.\nThe current code is much simpler and readable.\n\nregards,\nRanier Vilela\n\nEm qua., 23 de jun. de 2021 às 21:51, Tom Lane <tgl@sss.pgh.pa.us> escreveu:Greg Nancarrow <gregn4422@gmail.com> writes:\n> On Wed, Jun 23, 2021 at 11:01 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> The comment is written in terms of \"when can we\n>> skip taking a snapshot\", while the test in the code is written for\n>> the inverse condition \"when do we need a snapshot\".\n\n> Perhaps that code could have been written as the following, to better\n> align with the comments:\n> [ invert the variable's meaning ]\n\nYeah, perhaps.  I remember feeling that the code was clearer this\nway (because \"if (!skip_snapshot)\" seems a little backwards).\nBut it might be better to make the code fit the comment than to\ntry to invert the description in the comment.\nI'm not a native speaker, so I would be of little help with clearer and more elusive comments.If you both agree that the current code is correct, please correct the comments.The current code is much simpler and readable.regards,Ranier Vilela", "msg_date": "Thu, 24 Jun 2021 13:05:12 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Assertion failure in HEAD and 13 after calling COMMIT in a stored\n proc" } ]
[ { "msg_contents": "I think we can benefit from higher level operator classes which can\nsupport multiple index implementations. This is achievable by\nintroducing another type of access method. Here is my idea in SQL:\n\nCREATE ACCESS METHOD ordering\nTYPE INTERFACE HANDLER ordering_ifam_handler;\n\nCREATE ACCESS METHOD btree\nTYPE INDEX HANDLER bthandler\nIMPLEMENTS (ordering);\n\nCREATE OPERATOR CLASS btree_int_ops\nFOR TYPE int USING ordering AS\nFUNCTION 1 btint4cmp(int, int),\nFUNCTION 3 =(int, int);\n\nThis can make it easier to develop both new index access methods and\ndata types. The extensions can provide them without depending on each\nother.\n\nThe initial implementation is attached. I wrote it only to ask for\nfeedback. I am happy to work on the missing pieces if the community\nsupports the idea.\n\nI suggest the initial version to come with 2 new access methods in the\nnew type: hashing and ordering. We can use those in the functions\nthat are currently searching for the hash and btree operator classes.\n\nLater, I want to work on developing another access method for\ncontainment. It can support high level operator classes with only SQL\ncallable functions. GiST, SP-GiST, and BRIN can implement this. We\ncan allow the new implementations together with the existing ones.", "msg_date": "Tue, 22 Jun 2021 19:52:23 +0300", "msg_from": "Emre Hasegeli <emre@hasegeli.com>", "msg_from_op": true, "msg_subject": "Decouple operator classes from index access methods" }, { "msg_contents": "Emre Hasegeli <emre@hasegeli.com> writes:\n> I think we can benefit from higher level operator classes which can\n> support multiple index implementations. This is achievable by\n> introducing another type of access method.\n\nI do not really understand what the point of that is?\n\nTo the extent that operator classes have any meaning at all apart\nfrom the associated index AMs, ISTM it's that they embody specific\nwell-known semantics, as btree and hash opclasses do for sorting\nand hashing respectively. I'm not clear on what a multi-AM\nopclass notion would do for us.\n\n> I suggest the initial version to come with 2 new access methods in the\n> new type: hashing and ordering. We can use those in the functions\n> that are currently searching for the hash and btree operator classes.\n\nAgain, exactly what does that buy us, other than more complication\nand overhead?\n\nI can see some value perhaps in letting other opclasses refer to\nbtree and hash opclasses rather than duplicating their entries.\nBut that doesn't seem to be what you're proposing here.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Tue, 22 Jun 2021 13:05:52 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Decouple operator classes from index access methods" }, { "msg_contents": "> I can see some value perhaps in letting other opclasses refer to\n> btree and hash opclasses rather than duplicating their entries.\n> But that doesn't seem to be what you're proposing here.\n\nThat's what I am proposing. My idea is to change the current btree\nand hash opclasses to be under the new proposed access methods so\nbtree, hash, and any other index access method can refer to them.\n\n\n", "msg_date": "Tue, 22 Jun 2021 20:21:28 +0300", "msg_from": "Emre Hasegeli <emre@hasegeli.com>", "msg_from_op": true, "msg_subject": "Re: Decouple operator classes from index access methods" }, { "msg_contents": "On Tue, Jun 22, 2021 at 8:06 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> > I suggest the initial version to come with 2 new access methods in the\n> > new type: hashing and ordering. We can use those in the functions\n> > that are currently searching for the hash and btree operator classes.\n>\n> Again, exactly what does that buy us, other than more complication\n> and overhead?\n>\n> I can see some value perhaps in letting other opclasses refer to\n> btree and hash opclasses rather than duplicating their entries.\n> But that doesn't seem to be what you're proposing here.\n\nIn future we could have, for instance, LSM or in-memory B-tree or\nother index AM, which could use existing B-tree or hash opclasses.\n\nBut even now, we could use this decoupling to get rid of ugly\nbtree_gist and btree_gin. And also solve the extensibility problem\nhere. If an extension provides datatype with B-tree opclass, we\ncurrently can't directly use it with GiST and GIN. So, in order to\nprovide B-tree-like indexing for GiST and GIN, an extension needs to\nexplicitly define GiST and GIN B-tree-like opclasses.\n\n From my point of view, we can consider a decoupling patch if it will\ncome with an ability to use B-tree opclasses directly in GiST and GIN.\n\n------\nRegards,\nAlexander Korotkov\n\n\n", "msg_date": "Tue, 22 Jun 2021 23:42:47 +0300", "msg_from": "Alexander Korotkov <aekorotkov@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Decouple operator classes from index access methods" }, { "msg_contents": "> In future we could have, for instance, LSM or in-memory B-tree or\n> other index AM, which could use existing B-tree or hash opclasses.\n\nThis would be easily possible with my patch:\n\nCREATE ACCESS METHOD inmemorybtree\nTYPE INDEX HANDLER imbthandler\nIMPLEMENTS (ordering);\n\n> But even now, we could use this decoupling to get rid of ugly\n> btree_gist and btree_gin. And also solve the extensibility problem\n> here. If an extension provides datatype with B-tree opclass, we\n> currently can't directly use it with GiST and GIN. So, in order to\n> provide B-tree-like indexing for GiST and GIN, an extension needs to\n> explicitly define GiST and GIN B-tree-like opclasses.\n\nThis would also be possible if we move btree_gist and btree_gin\nsupport functions inside gist and gin access methods. The access\nmethod support functions get the operator family. They can find which\naccess method this operator family belongs to, and call the\nappropriate functions if it is \"ordering\".\n\n\n", "msg_date": "Fri, 25 Jun 2021 12:17:34 +0300", "msg_from": "Emre Hasegeli <emre@hasegeli.com>", "msg_from_op": true, "msg_subject": "Re: Decouple operator classes from index access methods" }, { "msg_contents": "On Fri, Jun 25, 2021 at 12:18 PM Emre Hasegeli <emre@hasegeli.com> wrote:\n> > In future we could have, for instance, LSM or in-memory B-tree or\n> > other index AM, which could use existing B-tree or hash opclasses.\n>\n> This would be easily possible with my patch:\n>\n> CREATE ACCESS METHOD inmemorybtree\n> TYPE INDEX HANDLER imbthandler\n> IMPLEMENTS (ordering);\n>\n> > But even now, we could use this decoupling to get rid of ugly\n> > btree_gist and btree_gin. And also solve the extensibility problem\n> > here. If an extension provides datatype with B-tree opclass, we\n> > currently can't directly use it with GiST and GIN. So, in order to\n> > provide B-tree-like indexing for GiST and GIN, an extension needs to\n> > explicitly define GiST and GIN B-tree-like opclasses.\n>\n> This would also be possible if we move btree_gist and btree_gin\n> support functions inside gist and gin access methods. The access\n> method support functions get the operator family. They can find which\n> access method this operator family belongs to, and call the\n> appropriate functions if it is \"ordering\".\n\nYes, that's it. That's quite an amount of work, but I think this\nwould be a great illustration of the advantages of this decoupling.\n\n------\nRegards,\nAlexander Korotkov\n\n\n", "msg_date": "Fri, 25 Jun 2021 23:42:39 +0300", "msg_from": "Alexander Korotkov <aekorotkov@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Decouple operator classes from index access methods" } ]
[ { "msg_contents": "When running a VACUUM or CLUSTER command, the namespace name is not part of\nthe emitted message.\n\nUsing `vacuumdb` CLI tool recently with multiple jobs, I found that\nreading the output messages harder to match the relations with their\nnamespaces.\n\nExample:\n\nINFO: vacuuming \"sendgrid.open\"\nINFO: vacuuming \"mailgun.open\"\n...\nINFO: \"open\": found 0 removable, 31460776 nonremovable row versions in\n1358656 pages\nDETAIL: 0 dead row versions cannot be removed yet.\nCPU 31.35s/261.26u sec elapsed 1620.68 sec.\n...\n\nIn this example. the user can't readily tell which `open` relation was\ncompleted.\n\nAttached is a patch using existing functions to include the namespace in\nthe output string.\n\nLooking forward to feedback!\n-Mike Fiedler", "msg_date": "Tue, 22 Jun 2021 18:07:53 -0400", "msg_from": "Mike <miketheman@gmail.com>", "msg_from_op": true, "msg_subject": "Fwd: Emit namespace in post-copy output" }, { "msg_contents": "On Tue, Jun 22, 2021 at 6:08 PM Mike <miketheman@gmail.com> wrote:\n\n> When running a VACUUM or CLUSTER command, the namespace name is not part\n> of the emitted message.\n>\n> Using `vacuumdb` CLI tool recently with multiple jobs, I found that\n> reading the output messages harder to match the relations with their\n> namespaces.\n>\n> Example:\n>\n> INFO: vacuuming \"sendgrid.open\"\n> INFO: vacuuming \"mailgun.open\"\n> ...\n> INFO: \"open\": found 0 removable, 31460776 nonremovable row versions in\n> 1358656 pages\n> DETAIL: 0 dead row versions cannot be removed yet.\n> CPU 31.35s/261.26u sec elapsed 1620.68 sec.\n> ...\n>\n> In this example. the user can't readily tell which `open` relation was\n> completed.\n>\n> Attached is a patch using existing functions to include the namespace in\n> the output string.\n>\n> Looking forward to feedback!\n> -Mike Fiedler\n>\n\nI've added this to the open commitfest:\nhttps://commitfest.postgresql.org/33/3200/\n\nThe change is quite simple, just 3 lines, adding the schema name to two\ndifferent lines of output.\n\nAs such, there is no obvious documentation to change, though I can imagine\nthat we have sample output from vacuum, vacuumdb or cluster somewhere that\nwould need to be updated.\n\nI cobbled together a very simple test:\n\n~/pgdata$ /usr/local/pgsql/bin/psql postgres\npsql (14beta2)\nType \"help\" for help.\npostgres=# create database mike_test;\nCREATE DATABASE\npostgres=# \\c mike_test\nYou are now connected to database \"mike_test\" as user \"corey\".\nmike_test=# create schema foo;\nCREATE SCHEMA\nmike_test=# create table foo.bar(x integer);\nCREATE TABLE\nmike_test=# \\q\nmike_test=# VACUUM FULL VERBOSE foo.bar;\nINFO: vacuuming \"foo.bar\"\nINFO: \"foo.bar\": found 0 removable, 0 nonremovable row versions in 0 pages\nDETAIL: 0 dead row versions cannot be removed yet.\nCPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s.\nVACUUM\n\n\nAnd of course vacuumdb\n\n~/pgdata$ /usr/local/pgsql/bin/vacuumdb --full --verbose mike_test\n--table=foo.bar\nvacuumdb: vacuuming database \"mike_test\"\nINFO: vacuuming \"foo.bar\"\nINFO: \"foo.bar\": found 0 removable, 0 nonremovable row versions in 0 pages\nDETAIL: 0 dead row versions cannot be removed yet.\nCPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s.\n\n\n So far, so good.\n\nOn Tue, Jun 22, 2021 at 6:08 PM Mike <miketheman@gmail.com> wrote:When running a VACUUM or CLUSTER command, the namespace name is not part of the emitted message.Using `vacuumdb` CLI tool recently with multiple jobs, I found that reading the output messages harder to match the relations with their namespaces.Example:INFO:  vacuuming \"sendgrid.open\"INFO:  vacuuming \"mailgun.open\"...INFO:  \"open\": found 0 removable, 31460776 nonremovable row versions in 1358656 pagesDETAIL:  0 dead row versions cannot be removed yet.CPU 31.35s/261.26u sec elapsed 1620.68 sec....In this example. the user can't readily tell which `open` relation was completed.Attached is a patch using existing functions to include the namespace in the output string.Looking forward to feedback!-Mike FiedlerI've added this to the open commitfest: https://commitfest.postgresql.org/33/3200/The change is quite simple, just 3 lines, adding the schema name to two different lines of output.As such, there is no obvious documentation to change, though I can imagine that we have sample output from vacuum, vacuumdb or cluster somewhere that would need to be updated.I cobbled together a very simple test:~/pgdata$ /usr/local/pgsql/bin/psql postgrespsql (14beta2)Type \"help\" for help.postgres=# create database mike_test;CREATE DATABASEpostgres=# \\c mike_test You are now connected to database \"mike_test\" as user \"corey\".mike_test=# create schema foo;CREATE SCHEMAmike_test=# create table foo.bar(x integer);CREATE TABLEmike_test=# \\qmike_test=# VACUUM FULL VERBOSE foo.bar;INFO:  vacuuming \"foo.bar\"INFO:  \"foo.bar\": found 0 removable, 0 nonremovable row versions in 0 pagesDETAIL:  0 dead row versions cannot be removed yet.CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s.VACUUMAnd of course vacuumdb~/pgdata$ /usr/local/pgsql/bin/vacuumdb --full --verbose mike_test --table=foo.barvacuumdb: vacuuming database \"mike_test\"INFO:  vacuuming \"foo.bar\"INFO:  \"foo.bar\": found 0 removable, 0 nonremovable row versions in 0 pagesDETAIL:  0 dead row versions cannot be removed yet.CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s. So far, so good.", "msg_date": "Wed, 23 Jun 2021 17:46:10 -0400", "msg_from": "Corey Huinker <corey.huinker@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Emit namespace in post-copy output" }, { "msg_contents": "Awesome, thanks! Are there any other steps I should take?\n\nOn Wed, Jun 23, 2021 at 5:46 PM Corey Huinker <corey.huinker@gmail.com>\nwrote:\n\n> On Tue, Jun 22, 2021 at 6:08 PM Mike <miketheman@gmail.com> wrote:\n>\n>> When running a VACUUM or CLUSTER command, the namespace name is not part\n>> of the emitted message.\n>>\n>> Using `vacuumdb` CLI tool recently with multiple jobs, I found that\n>> reading the output messages harder to match the relations with their\n>> namespaces.\n>>\n>> Example:\n>>\n>> INFO: vacuuming \"sendgrid.open\"\n>> INFO: vacuuming \"mailgun.open\"\n>> ...\n>> INFO: \"open\": found 0 removable, 31460776 nonremovable row versions in\n>> 1358656 pages\n>> DETAIL: 0 dead row versions cannot be removed yet.\n>> CPU 31.35s/261.26u sec elapsed 1620.68 sec.\n>> ...\n>>\n>> In this example. the user can't readily tell which `open` relation was\n>> completed.\n>>\n>> Attached is a patch using existing functions to include the namespace in\n>> the output string.\n>>\n>> Looking forward to feedback!\n>> -Mike Fiedler\n>>\n>\n> I've added this to the open commitfest:\n> https://commitfest.postgresql.org/33/3200/\n>\n> The change is quite simple, just 3 lines, adding the schema name to two\n> different lines of output.\n>\n> As such, there is no obvious documentation to change, though I can imagine\n> that we have sample output from vacuum, vacuumdb or cluster somewhere that\n> would need to be updated.\n>\n> I cobbled together a very simple test:\n>\n> ~/pgdata$ /usr/local/pgsql/bin/psql postgres\n> psql (14beta2)\n> Type \"help\" for help.\n> postgres=# create database mike_test;\n> CREATE DATABASE\n> postgres=# \\c mike_test\n> You are now connected to database \"mike_test\" as user \"corey\".\n> mike_test=# create schema foo;\n> CREATE SCHEMA\n> mike_test=# create table foo.bar(x integer);\n> CREATE TABLE\n> mike_test=# \\q\n> mike_test=# VACUUM FULL VERBOSE foo.bar;\n> INFO: vacuuming \"foo.bar\"\n> INFO: \"foo.bar\": found 0 removable, 0 nonremovable row versions in 0 pages\n> DETAIL: 0 dead row versions cannot be removed yet.\n> CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s.\n> VACUUM\n>\n>\n> And of course vacuumdb\n>\n> ~/pgdata$ /usr/local/pgsql/bin/vacuumdb --full --verbose mike_test\n> --table=foo.bar\n> vacuumdb: vacuuming database \"mike_test\"\n> INFO: vacuuming \"foo.bar\"\n> INFO: \"foo.bar\": found 0 removable, 0 nonremovable row versions in 0 pages\n> DETAIL: 0 dead row versions cannot be removed yet.\n> CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s.\n>\n>\n> So far, so good.\n>\n\nAwesome, thanks! Are there any other steps I should take?On Wed, Jun 23, 2021 at 5:46 PM Corey Huinker <corey.huinker@gmail.com> wrote:On Tue, Jun 22, 2021 at 6:08 PM Mike <miketheman@gmail.com> wrote:When running a VACUUM or CLUSTER command, the namespace name is not part of the emitted message.Using `vacuumdb` CLI tool recently with multiple jobs, I found that reading the output messages harder to match the relations with their namespaces.Example:INFO:  vacuuming \"sendgrid.open\"INFO:  vacuuming \"mailgun.open\"...INFO:  \"open\": found 0 removable, 31460776 nonremovable row versions in 1358656 pagesDETAIL:  0 dead row versions cannot be removed yet.CPU 31.35s/261.26u sec elapsed 1620.68 sec....In this example. the user can't readily tell which `open` relation was completed.Attached is a patch using existing functions to include the namespace in the output string.Looking forward to feedback!-Mike FiedlerI've added this to the open commitfest: https://commitfest.postgresql.org/33/3200/The change is quite simple, just 3 lines, adding the schema name to two different lines of output.As such, there is no obvious documentation to change, though I can imagine that we have sample output from vacuum, vacuumdb or cluster somewhere that would need to be updated.I cobbled together a very simple test:~/pgdata$ /usr/local/pgsql/bin/psql postgrespsql (14beta2)Type \"help\" for help.postgres=# create database mike_test;CREATE DATABASEpostgres=# \\c mike_test You are now connected to database \"mike_test\" as user \"corey\".mike_test=# create schema foo;CREATE SCHEMAmike_test=# create table foo.bar(x integer);CREATE TABLEmike_test=# \\qmike_test=# VACUUM FULL VERBOSE foo.bar;INFO:  vacuuming \"foo.bar\"INFO:  \"foo.bar\": found 0 removable, 0 nonremovable row versions in 0 pagesDETAIL:  0 dead row versions cannot be removed yet.CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s.VACUUMAnd of course vacuumdb~/pgdata$ /usr/local/pgsql/bin/vacuumdb --full --verbose mike_test --table=foo.barvacuumdb: vacuuming database \"mike_test\"INFO:  vacuuming \"foo.bar\"INFO:  \"foo.bar\": found 0 removable, 0 nonremovable row versions in 0 pagesDETAIL:  0 dead row versions cannot be removed yet.CPU: user: 0.00 s, system: 0.00 s, elapsed: 0.00 s. So far, so good.", "msg_date": "Fri, 25 Jun 2021 11:10:47 -0400", "msg_from": "Mike <miketheman@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Emit namespace in post-copy output" }, { "msg_contents": "On Fri, Jun 25, 2021, at 12:10 PM, Mike wrote:\n> Awesome, thanks! Are there any other steps I should take?\nNo. Keep an eye on this thread. If you modify this patch, check if PostgreSQL\nPatch Tester [1] reports failure. Since your patch does not modify a\nconsiderable amount of code, it probably won't conflict with another patch. If\nso, a reviewer will say so. If your patch doesn't have objections, it will\neventually be committed. BTW, your patch looks good to me.\n\n\n[1] http://cfbot.cputube.org/index.html\n\n\n--\nEuler Taveira\nEDB https://www.enterprisedb.com/\n\nOn Fri, Jun 25, 2021, at 12:10 PM, Mike wrote:Awesome, thanks! Are there any other steps I should take?No. Keep an eye on this thread. If you modify this patch, check if PostgreSQLPatch Tester [1] reports failure. Since your patch does not modify aconsiderable amount of code, it probably won't conflict with another patch. Ifso, a reviewer will say so. If your patch doesn't have objections, it willeventually be committed. BTW, your patch looks good to me.[1] http://cfbot.cputube.org/index.html--Euler TaveiraEDB   https://www.enterprisedb.com/", "msg_date": "Fri, 25 Jun 2021 16:56:30 -0300", "msg_from": "\"Euler Taveira\" <euler@eulerto.com>", "msg_from_op": false, "msg_subject": "Re: Emit namespace in post-copy output" }, { "msg_contents": "The following review has been posted through the commitfest application:\nmake installcheck-world: tested, passed\nImplements feature: tested, passed\nSpec compliant: not tested\nDocumentation: not tested\n\nPassed make check-world. Running make installcheck-world had 2 errors out of 209, but I got those same 2 errors on a clean branch.\r\n\r\nFeature is as-described, and very simple.\r\n\r\nAs far as I can tell, there is no external specification for vacuum or any related utility.\r\n\r\nI searched the documentation, and found several examples of the invocation of the VACUUM FULL command and vacuuumdb utility, but at no point was sample output shown, so this change will not require updating documentation.\n\nThe new status of this patch is: Ready for Committer\n", "msg_date": "Tue, 27 Jul 2021 06:01:13 +0000", "msg_from": "Corey Huinker <corey.huinker@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Fwd: Emit namespace in post-copy output" }, { "msg_contents": "I took a look at this I agree with the reviewer that it's a good change. The\noutput from multiple jobs in vacuumdb is clearly easier to parse with this\nsince the initial LOG and later DETAIL can be interleaved with other relations\nof the same name in other namespaces.\n\n+\tget_namespace_name(RelationGetNamespace(OldHeap)),\n\nSince get_namespace_name() returns a palloced string, this will lead to a 2x\nleak of the namespace length as opposed to the 1x of today. While hardly a big\ndeal, it seems prudent to cap this by storing the returned string locally now\nthat we need it twice.\n\nI've updated the patch with this, see the attached v2. Barring objections I\nwill go ahead with this.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/", "msg_date": "Wed, 28 Jul 2021 16:15:19 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: Emit namespace in post-copy output" }, { "msg_contents": "On Wed, Jul 28, 2021 at 04:15:19PM +0200, Daniel Gustafsson wrote:\n> Since get_namespace_name() returns a palloced string, this will lead to a 2x\n> leak of the namespace length as opposed to the 1x of today. While hardly a big\n> deal, it seems prudent to cap this by storing the returned string locally now\n> that we need it twice.\n\nI don't think this matters much. A quick read of the code shows that\nthis memory should be allocated within the transaction context running\nCLUSTER/VACUUM FULL, and that gets free'd in the internal calls of\nCommitTransactionCommand().\n--\nMichael", "msg_date": "Thu, 29 Jul 2021 09:57:30 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Emit namespace in post-copy output" }, { "msg_contents": "> On 28 Jul 2021, at 16:15, Daniel Gustafsson <daniel@yesql.se> wrote:\n\n> I took a look at this I agree with the reviewer that it's a good change.\n\nPushed to master, thanks!\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Mon, 16 Aug 2021 20:13:02 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: Emit namespace in post-copy output" } ]
[ { "msg_contents": "(This is split off from my work on OAUTHBEARER [1].)\r\n\r\nCurrently, the SASL logic is tightly coupled to the SCRAM\r\nimplementation. This patch untangles the two, by introducing callback\r\nstructs for both the frontend and backend.\r\n\r\nIn the original thread, Michael Paquier commented:\r\n\r\n> + /* TODO: SASL_EXCHANGE_FAILURE with output is forbidden in SASL */\r\n> if (result == SASL_EXCHANGE_SUCCESS)\r\n> sendAuthRequest(port,\r\n> AUTH_REQ_SASL_FIN,\r\n> output,\r\n> outputlen);\r\n> Perhaps that's an issue we need to worry on its own? I didn't recall\r\n> this part..\r\n\r\nYeah, it was non-obvious to me on the first read too. It's a\r\nconsequence of a couple parts of the SASL spec [2]:\r\n\r\n> The protocol may include an optional additional data field in this\r\n> outcome message. This field can only include additional data when\r\n> the outcome is successful.\r\n\r\nand\r\n\r\n> SASL mechanism specifications MUST supply the following information:\r\n> \r\n> [...]\r\n> \r\n> b) An indication of whether the server is expected to provide\r\n> additional data when indicating a successful outcome. If so,\r\n> if the server sends the additional data as a challenge, the\r\n> specification MUST indicate that the response to this challenge\r\n> is an empty response.\r\n\r\nThere isn't a corresponding provision for data for a *failed* outcome,\r\nso any such data would have to be sent as a separate, mechanism-\r\nspecific, challenge. This is what OAUTHBEARER has to do, with an\r\nannoying \"failure handshake\".\r\n\r\n(Note that our protocol implementation provides an \"additional data\"\r\nfield for the initial client response, but *not* for the authentication\r\noutcome. That seems odd to me, but it is what it is, I suppose.)\r\n\r\nRegarding that specific TODO -- I think it'd be good for the framework\r\nto fail hard if a mechanism tries to send data during a failure\r\noutcome, as it probably means the mechanism isn't implemented to spec.\r\n\r\n--Jacob\r\n\r\n[1] https://www.postgresql.org/message-id/d1b467a78e0e36ed85a09adf979d04cf124a9d4b.camel@vmware.com\r\n[2] https://datatracker.ietf.org/doc/html/rfc4422", "msg_date": "Tue, 22 Jun 2021 22:37:29 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "[PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Tue, Jun 22, 2021 at 10:37:29PM +0000, Jacob Champion wrote:\n> Currently, the SASL logic is tightly coupled to the SCRAM\n> implementation. This patch untangles the two, by introducing callback\n> structs for both the frontend and backend.\n\nThe approach to define and have a set callbacks feels natural.\n\n+/* Status codes for message exchange */\n+#define SASL_EXCHANGE_CONTINUE 0\n+#define SASL_EXCHANGE_SUCCESS 1\n+#define SASL_EXCHANGE_FAILURE 2\n\nIt may be better to prefix those with PG_ as they begin to be\npublished.\n\n+/* Backend mechanism API */\n+typedef void (*pg_be_sasl_mechanism_func)(Port *, StringInfo);\n+typedef void *(*pg_be_sasl_init_func)(Port *, const char *, const\nchar *);\n+typedef int (*pg_be_sasl_exchange_func)(void *, const char *, int,\nchar **, int *, char **);\n+\n+typedef struct\n+{\n+ pg_be_sasl_mechanism_func get_mechanisms;\n+ pg_be_sasl_init_func init;\n+ pg_be_sasl_exchange_func exchange;\n+} pg_be_sasl_mech;\n\nAll this is going to require much more documentation to explain what\nis the role of those callbacks and what they are here for.\n\nAnother thing that is not tackled by this patch is the format of the\nmessages exchanged which is something only in SCRAM now. Perhaps it\nwould be better to extract the small-ish routines currently in\nfe-auth-scram.c and auth-scram.c that we use to grab values associated\nto an attribute in an exchange message and put them in a central place\nlike an auth-sasl.c and fe-auth-sasl.c. This move could also make\nsense for the exising init and continue routines for SASL in\nfe-auth.c.\n\n+static int\n+CheckSCRAMAuth(Port *port, char *shadow_pass, char **logdetail)\n+{\n+ return SASL_exchange(&pg_be_scram_mech, port, shadow_pass, logdetail);\n+}\nIt may be cleaner to live without this thin wrapper. It is a bit\nstrange to have a SCRAM API in a file where we want mostly SASL things\n(Okay, uaScram does not count as this is assigned after the HBA\nlookup). Moving any SASL-related things into a separate file may be a\ncleaner option, especially considering that we have a bit more than\nthe exchange itself, like message handling.\n\n+typedef void *(*pg_sasl_init_func)(PGconn *, const char *, const char\n*);\n+typedef void (*pg_sasl_exchange_func)(void *, char *, int, char **,\nint *, bool *, bool *);\n+typedef bool (*pg_sasl_channel_bound_func)(void *);\n+typedef void (*pg_sasl_free_func)(void *);\n+\n+typedef struct\n+{\n+ pg_sasl_init_func init;\n+ pg_sasl_exchange_func exchange;\n+ pg_sasl_channel_bound_func channel_bound;\n+ pg_sasl_free_func free;\n+} pg_sasl_mech;\nThese would be better into a separate header, with more\ndocumentation. It may be more consistent with the backend to name\nthat pg_fe_sasl_mech?\n\nIt looks like there is enough material for a callback able to handle\nchannel binding. In the main patch for OAUTHBEARER, I can see for\nexample that the handling of OAUTHBEARER-PLUS copied from its SCRAM\nsibling. That does not need to be tackled in the same patch. Just\nnoting it on the way.\n\n> (Note that our protocol implementation provides an \"additional data\"\n> field for the initial client response, but *not* for the authentication\n> outcome. That seems odd to me, but it is what it is, I suppose.)\n\nYou are referring to the protocol implementation as of\nAuthenticationSASLFinal, right?\n\n> Regarding that specific TODO -- I think it'd be good for the framework\n> to fail hard if a mechanism tries to send data during a failure\n> outcome, as it probably means the mechanism isn't implemented to spec.\n\nAgreed. That would mean patching libpq to add more safeguards in\npg_SASL_continue() if I am following correctly.\n--\nMichael", "msg_date": "Wed, 23 Jun 2021 16:38:46 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Wed, 2021-06-23 at 16:38 +0900, Michael Paquier wrote:\r\n> On Tue, Jun 22, 2021 at 10:37:29PM +0000, Jacob Champion wrote:\r\n> > Currently, the SASL logic is tightly coupled to the SCRAM\r\n> > implementation. This patch untangles the two, by introducing callback\r\n> > structs for both the frontend and backend.\r\n> \r\n> The approach to define and have a set callbacks feels natural.\r\n\r\nGood, thanks!\r\n\r\n> +/* Status codes for message exchange */\r\n> +#define SASL_EXCHANGE_CONTINUE 0\r\n> +#define SASL_EXCHANGE_SUCCESS 1\r\n> +#define SASL_EXCHANGE_FAILURE 2\r\n> \r\n> It may be better to prefix those with PG_ as they begin to be\r\n> published.\r\n\r\nAdded in v2.\r\n\r\n> +/* Backend mechanism API */\r\n> +typedef void (*pg_be_sasl_mechanism_func)(Port *, StringInfo);\r\n> +typedef void *(*pg_be_sasl_init_func)(Port *, const char *, const\r\n> char *);\r\n> +typedef int (*pg_be_sasl_exchange_func)(void *, const char *, int,\r\n> char **, int *, char **);\r\n> +\r\n> +typedef struct\r\n> +{\r\n> + pg_be_sasl_mechanism_func get_mechanisms;\r\n> + pg_be_sasl_init_func init;\r\n> + pg_be_sasl_exchange_func exchange;\r\n> +} pg_be_sasl_mech;\r\n> \r\n> All this is going to require much more documentation to explain what\r\n> is the role of those callbacks and what they are here for.\r\n\r\nYes, definitely. If the current approach seems generally workable, I'll\r\nget started on that.\r\n\r\n> Another thing that is not tackled by this patch is the format of the\r\n> messages exchanged which is something only in SCRAM now. Perhaps it\r\n> would be better to extract the small-ish routines currently in\r\n> fe-auth-scram.c and auth-scram.c that we use to grab values associated\r\n> to an attribute in an exchange message and put them in a central place\r\n> like an auth-sasl.c and fe-auth-sasl.c. This move could also make\r\n> sense for the exising init and continue routines for SASL in\r\n> fe-auth.c.\r\n\r\nWe can. I recommend waiting for another GS2 mechanism implementation,\r\nthough.\r\n\r\nThe attribute/value encoding is not part of core SASL (see [1] for that\r\nRFC), and OAUTHBEARER is not technically a GS2 mechanism -- though it\r\nmakes use of a vestigal GS2 header block, apparently in the hopes that\r\none day it might become one. So we could pull out the similarities now,\r\nbut I'd hate to extract the wrong abstractions and make someone else\r\nuntangle it later.\r\n\r\n> +static int\r\n> +CheckSCRAMAuth(Port *port, char *shadow_pass, char **logdetail)\r\n> +{\r\n> + return SASL_exchange(&pg_be_scram_mech, port, shadow_pass, logdetail);\r\n> +}\r\n> It may be cleaner to live without this thin wrapper. It is a bit\r\n> strange to have a SCRAM API in a file where we want mostly SASL things\r\n> (Okay, uaScram does not count as this is assigned after the HBA\r\n> lookup). Moving any SASL-related things into a separate file may be a\r\n> cleaner option, especially considering that we have a bit more than\r\n> the exchange itself, like message handling.\r\n\r\nHeh, I figured that at ~3500 lines, you all just really wanted the\r\nCheck* implementations to live in auth.c. :D\r\n\r\nI can definitely move it (into, say, auth-sasl.c?). I'll probably do\r\nthat in a second commit, though, since keeping it in place during the\r\nrefactor makes the review easier IMO.\r\n\r\n> +typedef void *(*pg_sasl_init_func)(PGconn *, const char *, const char\r\n> *);\r\n> +typedef void (*pg_sasl_exchange_func)(void *, char *, int, char **,\r\n> int *, bool *, bool *);\r\n> +typedef bool (*pg_sasl_channel_bound_func)(void *);\r\n> +typedef void (*pg_sasl_free_func)(void *);\r\n> +\r\n> +typedef struct\r\n> +{\r\n> + pg_sasl_init_func init;\r\n> + pg_sasl_exchange_func exchange;\r\n> + pg_sasl_channel_bound_func channel_bound;\r\n> + pg_sasl_free_func free;\r\n> +} pg_sasl_mech;\r\n> These would be better into a separate header, with more\r\n> documentation.\r\n\r\nCan do. Does libpq-int-sasl.h work as a filename? This should not be\r\nexported to applications.\r\n\r\n> It may be more consistent with the backend to name\r\n> that pg_fe_sasl_mech?\r\n\r\nDone in v2.\r\n\r\n> It looks like there is enough material for a callback able to handle\r\n> channel binding. In the main patch for OAUTHBEARER, I can see for\r\n> example that the handling of OAUTHBEARER-PLUS copied from its SCRAM\r\n> sibling. That does not need to be tackled in the same patch. Just\r\n> noting it on the way.\r\n\r\nOAUTHBEARER doesn't support channel binding -- there's no OAUTHBEARER-\r\nPLUS, and there probably won't ever be, given the mechanism's\r\nsimplicity -- so I'd recommend that this wait for a second GS2\r\nmechanism implementation, as well.\r\n\r\n> > (Note that our protocol implementation provides an \"additional data\"\r\n> > field for the initial client response, but *not* for the authentication\r\n> > outcome. That seems odd to me, but it is what it is, I suppose.)\r\n> \r\n> You are referring to the protocol implementation as of\r\n> AuthenticationSASLFinal, right?\r\n\r\nYes, but I misremembered. My statement was wrong -- we do allow for\r\nadditional data in the authentication outcome from the server.\r\n\r\nFor AuthenticationSASLFinal, we don't distinguish between \"no\r\nadditional data\" and \"additional data of length zero\", which IIRC is a\r\nviolation of the SASL protocol. That may cause problems with a\r\ntheoretical future mechanism implementation, but I don't think it\r\naffects SCRAM. I believe we *do* distinguish between those cases\r\ncorrectly for the initial client response packet.\r\n\r\nSorry for the confusion; let me double-check again when I have fresh\r\neyes at the start of the week, before sending you on a goose chase.\r\n\r\n> > Regarding that specific TODO -- I think it'd be good for the framework\r\n> > to fail hard if a mechanism tries to send data during a failure\r\n> > outcome, as it probably means the mechanism isn't implemented to spec.\r\n> \r\n> Agreed. That would mean patching libpq to add more safeguards in\r\n> pg_SASL_continue() if I am following correctly.\r\n\r\nRight.\r\n\r\nThanks for the review!\r\n--Jacob\r\n\r\n[1] https://datatracker.ietf.org/doc/html/rfc5801", "msg_date": "Fri, 25 Jun 2021 23:40:33 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Fri, Jun 25, 2021 at 11:40:33PM +0000, Jacob Champion wrote:\n> I can definitely move it (into, say, auth-sasl.c?). I'll probably do\n> that in a second commit, though, since keeping it in place during the\n> refactor makes the review easier IMO.\n\nauth-sasl.c is a name consistent with the existing practice.\n\n> Can do. Does libpq-int-sasl.h work as a filename? This should not be\n> exported to applications.\n\nI would still with the existing naming used by fe-gssapi-common.h, so\nthat would be fe-auth-sasl.c and fe-auth-sasl.h, with the header\nremaining internal. Not strongly wedded to this name, of course, that\njust seems consistent.\n--\nMichael", "msg_date": "Sat, 26 Jun 2021 09:47:02 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Sat, 2021-06-26 at 09:47 +0900, Michael Paquier wrote:\r\n> On Fri, Jun 25, 2021 at 11:40:33PM +0000, Jacob Champion wrote:\r\n> > I can definitely move it (into, say, auth-sasl.c?). I'll probably do\r\n> > that in a second commit, though, since keeping it in place during the\r\n> > refactor makes the review easier IMO.\r\n> \r\n> auth-sasl.c is a name consistent with the existing practice.\r\n> \r\n> > Can do. Does libpq-int-sasl.h work as a filename? This should not be\r\n> > exported to applications.\r\n> \r\n> I would still with the existing naming used by fe-gssapi-common.h, so\r\n> that would be fe-auth-sasl.c and fe-auth-sasl.h, with the header\r\n> remaining internal. Not strongly wedded to this name, of course, that\r\n> just seems consistent.\r\n\r\nDone in v3, with a second patch for the code motion.\r\n\r\nI added a first pass at API documentation as well. This exposed some\r\nadditional front-end TODOs that I added inline, but they should\r\nprobably be dealt with independently of the refactor:\r\n\r\n- Zero-length client responses are legal in the SASL framework;\r\ncurrently we use zero as a sentinel for \"don't send a response\".\r\n\r\n- I don't think it's legal for a client to refuse a challenge from the\r\nserver without aborting the exchange, so we should probably check to\r\nmake sure that client responses are non-NULL in the success case.\r\n\r\n--Jacob", "msg_date": "Wed, 30 Jun 2021 22:30:12 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Wed, Jun 30, 2021 at 10:30:12PM +0000, Jacob Champion wrote:\n> Done in v3, with a second patch for the code motion.\n\nI have gone through that, tweaking the documentation you have added as\nthat's the meat of the patch, reworking a bit the declarations of the\ncallbacks (no need for several typedef gere) and doing some small\nformat changes to make the indentation happy. And that looks pretty\ngood. It is a bit sad that the SCRAM part cannot be completely\nunplugged from the auth part, because of the call to the free function\nand the HBA checks, but adding more wrappers to accomodate with that\nis not really worth it. So I'd like to apply that to clarify this\ncode layer, without the TODOs.\n\n- pg_be_scram_get_mechanisms(port, &sasl_mechs);\n- /* Put another '\\0' to mark that list is finished. */\n- appendStringInfoChar(&sasl_mechs, '\\0');\nI was wondering for a couple of seconds if it would not be better to\nlet the last '\\0' being set within the callback, but what you have\nhere looks better.\n\n- if (!pg_fe_scram_channel_bound(conn->sasl_state))\n+ if (!conn->sasl || !conn->sasl->channel_bound(conn->sasl_state))\nconn->sasl should be set in this code path. This style is safer.\n\nThe top comment of scram_init() still mentioned\npg_be_scram_get_mechanisms(), while it should be\nscram_get_mechanisms().\n\nPG_MAX_SASL_MESSAGE_LENGTH can stay within auth-sasl.c.\n\n> I added a first pass at API documentation as well. This exposed some\n> additional front-end TODOs that I added inline, but they should\n> probably be dealt with independently of the refactor:\n> \n> - Zero-length client responses are legal in the SASL framework;\n> currently we use zero as a sentinel for \"don't send a response\".\n\nCheck.\n\n> - I don't think it's legal for a client to refuse a challenge from the\n> server without aborting the exchange, so we should probably check to\n> make sure that client responses are non-NULL in the success case.\n\nHmm. Does the RFCs tell us anything about that?\n--\nMichael", "msg_date": "Mon, 5 Jul 2021 17:17:38 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Mon, 2021-07-05 at 17:17 +0900, Michael Paquier wrote:\r\n> On Wed, Jun 30, 2021 at 10:30:12PM +0000, Jacob Champion wrote:\r\n> > Done in v3, with a second patch for the code motion.\r\n> \r\n> I have gone through that, tweaking the documentation you have added as\r\n> that's the meat of the patch, reworking a bit the declarations of the\r\n> callbacks (no need for several typedef gere) and doing some small\r\n> format changes to make the indentation happy. And that looks pretty\r\n> good.\r\n\r\nLooks very good, thanks! A few comments on the docs changes:\r\n\r\n> +\t * Output parameters:\r\n> +\t *\r\n> +\t *\tbuf: A StringInfo buffer that the callback should populate with\r\n> +\t *\t\t supported mechanism names. The names are appended into this\r\n> +\t *\t\t StringInfo, separated by '\\0' bytes.\r\n\r\nEach name must be null-terminated, not just null-separated. That way\r\nthe list of names ends with an empty string:\r\n\r\n name-one\\0 <- added by the mechanism\r\n name-two\\0 <- added by the mechanism\r\n \\0 <- added by the framework\r\n\r\nThe way it's worded now, I could see some implementers failing to\r\nterminate the final name because the framework adds a trailing null\r\nalready -- but the framework is terminating the list, not the final\r\nname.\r\n\r\n> +\t * init()\r\n> +\t *\r\n> +\t * Initializes mechanism-specific state for a connection. This\r\n> +\t * callback must return a pointer to its allocated state, which will\r\n> +\t * be passed as-is as the first argument to the other callbacks.\r\n> +\t * free() is called to release any state resources.\r\n\r\nMaybe say \"The free() callback is called\" to differentiate it from\r\nstandard free()?\r\n\r\n> It is a bit sad that the SCRAM part cannot be completely\r\n> unplugged from the auth part, because of the call to the free function\r\n> and the HBA checks, but adding more wrappers to accomodate with that\r\n> is not really worth it.\r\n\r\nYeah. I think that additional improvements/refactoring here will come\r\nnaturally if clients are ever allowed to negotiate SASL mechanisms in\r\nthe future. Doesn't need to happen now.\r\n\r\n> - if (!pg_fe_scram_channel_bound(conn->sasl_state))\r\n> + if (!conn->sasl || !conn->sasl->channel_bound(conn->sasl_state))\r\n> conn->sasl should be set in this code path. This style is safer.\r\n\r\nIt's possible for conn->sasl to be NULL here, say if the client has\r\nchannel_binding=require but connects as a user with an MD5 secret. The\r\nSCRAM TAP tests have one such case.\r\n\r\n> The top comment of scram_init() still mentioned\r\n> pg_be_scram_get_mechanisms(), while it should be\r\n> scram_get_mechanisms().\r\n> \r\n> PG_MAX_SASL_MESSAGE_LENGTH can stay within auth-sasl.c.\r\n\r\nLooks good to me.\r\n\r\n> > - I don't think it's legal for a client to refuse a challenge from the\r\n> > server without aborting the exchange, so we should probably check to\r\n> > make sure that client responses are non-NULL in the success case.\r\n> \r\n> Hmm. Does the RFCs tell us anything about that?\r\n\r\nJust in general terms:\r\n\r\n> Each authentication exchange consists of a message from the client to\r\n> the server requesting authentication via a particular mechanism,\r\n> followed by one or more pairs of challenges from the server and\r\n> responses from the client, followed by a message from the server\r\n> indicating the outcome of the authentication exchange. (Note:\r\n> exchanges may also be aborted as discussed in Section 3.5.)\r\n\r\nSo a challenge must be met with a response, or the exchange must be\r\naborted. (And I don't think our protocol implementation provides a\r\nclient abort message; if something goes wrong, we just tear down the\r\nconnection.)\r\n\r\nThanks,\r\n--Jacob\r\n", "msg_date": "Tue, 6 Jul 2021 18:20:49 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Tue, Jul 06, 2021 at 06:20:49PM +0000, Jacob Champion wrote:\n> On Mon, 2021-07-05 at 17:17 +0900, Michael Paquier wrote:\n> Each name must be null-terminated, not just null-separated. That way\n> the list of names ends with an empty string:\n> \n> name-one\\0 <- added by the mechanism\n> name-two\\0 <- added by the mechanism\n> \\0 <- added by the framework\n> \n> The way it's worded now, I could see some implementers failing to\n> terminate the final name because the framework adds a trailing null\n> already -- but the framework is terminating the list, not the final\n> name.\n\nGood point. I have used ending with '\\0' bytes instead.\n\n>> +\t * init()\n>> +\t *\n>> +\t * Initializes mechanism-specific state for a connection. This\n>> +\t * callback must return a pointer to its allocated state, which will\n>> +\t * be passed as-is as the first argument to the other callbacks.\n>> +\t * free() is called to release any state resources.\n> \n> Maybe say \"The free() callback is called\" to differentiate it from\n> standard free()?\n\nYes, that could be confusing. Switched to your wording instead.\n\n> It's possible for conn->sasl to be NULL here, say if the client has\n> channel_binding=require but connects as a user with an MD5 secret. The\n> SCRAM TAP tests have one such case.\n\nIndeed.\n\n>> Hmm. Does the RFCs tell us anything about that?\n> \n> Just in general terms:\n> \n>> Each authentication exchange consists of a message from the client to\n>> the server requesting authentication via a particular mechanism,\n>> followed by one or more pairs of challenges from the server and\n>> responses from the client, followed by a message from the server\n>> indicating the outcome of the authentication exchange. (Note:\n>> exchanges may also be aborted as discussed in Section 3.5.)\n> \n> So a challenge must be met with a response, or the exchange must be\n> aborted. (And I don't think our protocol implementation provides a\n> client abort message; if something goes wrong, we just tear down the\n> connection.)\n\nThanks. At the same time, section 3.5 also says that the client may\nsend a message to abort. So one can interpret that the client has\nalso the choice to abort without sending a response back to the\nserver? Or I am just interpreting incorrectly the use of \"may\" in\nthis context?\n--\nMichael", "msg_date": "Wed, 7 Jul 2021 14:08:20 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Wed, 2021-07-07 at 14:08 +0900, Michael Paquier wrote:\r\n> On Tue, Jul 06, 2021 at 06:20:49PM +0000, Jacob Champion wrote:\r\n> > On Mon, 2021-07-05 at 17:17 +0900, Michael Paquier wrote:\r\n> > \r\n> > > Hmm. Does the RFCs tell us anything about that?\r\n> > \r\n> > Just in general terms:\r\n> > \r\n> > > Each authentication exchange consists of a message from the client to\r\n> > > the server requesting authentication via a particular mechanism,\r\n> > > followed by one or more pairs of challenges from the server and\r\n> > > responses from the client, followed by a message from the server\r\n> > > indicating the outcome of the authentication exchange. (Note:\r\n> > > exchanges may also be aborted as discussed in Section 3.5.)\r\n> > \r\n> > So a challenge must be met with a response, or the exchange must be\r\n> > aborted. (And I don't think our protocol implementation provides a\r\n> > client abort message; if something goes wrong, we just tear down the\r\n> > connection.)\r\n> \r\n> Thanks. At the same time, section 3.5 also says that the client may\r\n> send a message to abort. So one can interpret that the client has\r\n> also the choice to abort without sending a response back to the\r\n> server? Or I am just interpreting incorrectly the use of \"may\" in\r\n> this context?\r\n\r\nThat's correct. But the client may not simply ignore the challenge and\r\nkeep the exchange open waiting for a new one, as pg_SASL_continue()\r\ncurrently allows. That's what my TODO is referring to.\r\n\r\n--Jacob\r\n\r\n", "msg_date": "Wed, 7 Jul 2021 15:07:14 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Wed, Jul 07, 2021 at 03:07:14PM +0000, Jacob Champion wrote:\n> That's correct. But the client may not simply ignore the challenge and\n> keep the exchange open waiting for a new one, as pg_SASL_continue()\n> currently allows. That's what my TODO is referring to.\n\nI have been looking more at your three points from upthread and\nfeasted on the SASL RFC, as of:\n- Detection that no output is generated on PG_SASL_EXCHANGE_FAILURE\nfor the backend.\n- Handling of zero-length messages in the frontend. The backend\nhandles that already, and SCRAM would complain if sending such\nmessages, but I can see why you'd want to allow that for other\nmechanisms.\n- Making sure that a mechanism generates a message in the middle of\nthe exchange in the frontend.\n\nI agree that this looks like an improvement in terms of the\nexpectations behind a SASL mechanism, so I have done the attached to\nstrengthen a bit all those checks. However, I don't really see a\npoint in back-patching any of that, as SCRAM satisfies with its\nimplementation already all those conditions AFAIK. So that's an\nimprovement of the current code, and it fits nicely with the SASL\nrefactoring for the documentation of the callbacks.\n\nThoughts?\n--\nMichael", "msg_date": "Thu, 8 Jul 2021 16:27:19 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Thu, 2021-07-08 at 16:27 +0900, Michael Paquier wrote:\r\n> I agree that this looks like an improvement in terms of the\r\n> expectations behind a SASL mechanism, so I have done the attached to\r\n> strengthen a bit all those checks. However, I don't really see a\r\n> point in back-patching any of that, as SCRAM satisfies with its\r\n> implementation already all those conditions AFAIK.\r\n\r\nAgreed.\r\n\r\n> Thoughts?\r\n\r\nLGTM, thanks!\r\n\r\n> +\t *\toutputlen: The length (0 or higher) of the client response buffer,\r\n> +\t *\t\t\t invalid if output is NULL.\r\n\r\nnitpick: maybe \"ignored\" instead of \"invalid\"?\r\n\r\n--Jacob\r\n", "msg_date": "Fri, 9 Jul 2021 23:31:48 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Fri, Jul 09, 2021 at 11:31:48PM +0000, Jacob Champion wrote:\n> On Thu, 2021-07-08 at 16:27 +0900, Michael Paquier wrote:\n>> +\t *\toutputlen: The length (0 or higher) of the client response buffer,\n>> +\t *\t\t\t invalid if output is NULL.\n> \n> nitpick: maybe \"ignored\" instead of \"invalid\"?\n\nThanks, applied as 44bd012 after using your suggestion.\n\nAnother thing I noticed after more review is that the check in\nfe-auth.c to make sure that a message needs to be generated if the\nexchange is not completed yet has no need to depend on \"success\", only\n\"done\".\n--\nMichael", "msg_date": "Sun, 11 Jul 2021 13:16:05 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Sun, 2021-07-11 at 13:16 +0900, Michael Paquier wrote:\r\n> On Fri, Jul 09, 2021 at 11:31:48PM +0000, Jacob Champion wrote:\r\n> > On Thu, 2021-07-08 at 16:27 +0900, Michael Paquier wrote:\r\n> > > +\t *\toutputlen: The length (0 or higher) of the client response buffer,\r\n> > > +\t *\t\t\t invalid if output is NULL.\r\n> > \r\n> > nitpick: maybe \"ignored\" instead of \"invalid\"?\r\n> \r\n> Thanks, applied as 44bd012 after using your suggestion.\r\n\r\nThanks!\r\n\r\n> Another thing I noticed after more review is that the check in\r\n> fe-auth.c to make sure that a message needs to be generated if the\r\n> exchange is not completed yet has no need to depend on \"success\", only\r\n> \"done\".\r\n\r\nAh, right. I think the (!done && !success) case is probably indicative\r\nof an API smell, but that's probably something to clean up in a future\r\npass.\r\n\r\n--Jacob\r\n", "msg_date": "Tue, 13 Jul 2021 00:01:46 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Tue, Jul 13, 2021 at 12:01:46AM +0000, Jacob Champion wrote:\n> Ah, right. I think the (!done && !success) case is probably indicative\n> of an API smell, but that's probably something to clean up in a future\n> pass.\n\nYeah, agreed. I feel that it would should be cleaner to replace those\ntwo booleans with a status enum or a bitmask.\n--\nMichael", "msg_date": "Tue, 13 Jul 2021 09:47:15 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "Hello, hackers!\n\nI got an error while building one of the extensions.\n/home/mkulagin/pg-install/postgresql-master/include/internal/libpq-int.h:44:10: fatal error: fe-auth-sasl.h: No such file or directory\n #include \"fe-auth-sasl.h\"\n ^~~~~~~~~~~~~~~~ \n\nI think the new fe-auth-sasl.h file should be installed too.\nCorrection proposal in the attached file (but I'm not sure that fix of Install.pm is correct).\n\nRegards, Mikhail A. Kulagin\nPostgresPro", "msg_date": "Tue, 13 Jul 2021 12:41:27 +0300", "msg_from": "\"Mikhail Kulagin\" <m.kulagin@postgrespro.ru>", "msg_from_op": false, "msg_subject": "RE: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Tue, Jul 13, 2021 at 12:41:27PM +0300, Mikhail Kulagin wrote:\n> I got an error while building one of the extensions.\n> /home/mkulagin/pg-install/postgresql-master/include/internal/libpq-int.h:44:10: fatal error: fe-auth-sasl.h: No such file or directory\n> #include \"fe-auth-sasl.h\"\n> ^~~~~~~~~~~~~~~~ \n\nRight. I overlooked the fact that libpq-int.h is installed.\n\n> I think the new fe-auth-sasl.h file should be installed too.\n> Correction proposal in the attached file (but I'm not sure that fix\n> of Install.pm is correct). \n\nThat looks correct to me. I'll check that tomorrow.\n--\nMichael", "msg_date": "Tue, 13 Jul 2021 19:31:56 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Tue, 2021-07-13 at 19:31 +0900, Michael Paquier wrote:\r\n> On Tue, Jul 13, 2021 at 12:41:27PM +0300, Mikhail Kulagin wrote:\r\n> > I got an error while building one of the extensions.\r\n> > /home/mkulagin/pg-install/postgresql-master/include/internal/libpq-int.h:44:10: fatal error: fe-auth-sasl.h: No such file or directory\r\n> > #include \"fe-auth-sasl.h\"\r\n> > ^~~~~~~~~~~~~~~~ \r\n> \r\n> Right. I overlooked the fact that libpq-int.h is installed.\r\n\r\nThanks for catching that Mikhail.\r\n\r\n> > I think the new fe-auth-sasl.h file should be installed too.\r\n> > Correction proposal in the attached file (but I'm not sure that fix\r\n> > of Install.pm is correct). \r\n> \r\n> That looks correct to me. I'll check that tomorrow.\r\n\r\nLooks right to me too. I'm currently rebuilding my Windows dev\r\nenvironment so I haven't been able to double-check that piece of it.\r\n\r\nJust to make sure -- do we want to export the fe-auth-sasl.h header as\r\nopposed to forward-declaring the pg_fe_sasl_mech struct? Is the use\r\ncase for libpq-int.h just \"here, have at the internals, and if you\r\nbreak it then it's on you\"?\r\n\r\n--Jacob\r\n", "msg_date": "Tue, 13 Jul 2021 22:41:01 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Tue, 2021-07-13 at 22:41 +0000, Jacob Champion wrote:\r\n> On Tue, 2021-07-13 at 19:31 +0900, Michael Paquier wrote:\r\n> > On Tue, Jul 13, 2021 at 12:41:27PM +0300, Mikhail Kulagin wrote:\r\n> > > \r\n> > > I think the new fe-auth-sasl.h file should be installed too.\r\n> > > Correction proposal in the attached file (but I'm not sure that fix\r\n> > > of Install.pm is correct). \r\n> > \r\n> > That looks correct to me. I'll check that tomorrow.\r\n> \r\n> Looks right to me too. I'm currently rebuilding my Windows dev\r\n> environment so I haven't been able to double-check that piece of it.\r\n\r\n(Confirmed that this patch works for me on Windows.)\r\n\r\nThanks,\r\n--Jacob\r\n", "msg_date": "Tue, 13 Jul 2021 23:52:10 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" }, { "msg_contents": "On Tue, Jul 13, 2021 at 10:41:01PM +0000, Jacob Champion wrote:\n> Just to make sure -- do we want to export the fe-auth-sasl.h header as\n> opposed to forward-declaring the pg_fe_sasl_mech struct?\n\nInstalling fe-auth-sasl.h has the advantage to make the internals of\nthe callbacks available to applications playing with the internals.\nFor SASL, it makes things easier to define new mechanisms out of\ncore.\n\n> Is the use\n> case for libpq-int.h just \"here, have at the internals, and if you\n> break it then it's on you\"?\n\nYes, it can be useful for applications willing to use the internals of\nlibpq, like in forks. There is no guarantee that this will not break\nacross major version upgrades, so that's up to the user to fix things\nonce they play with the internals.\n--\nMichael", "msg_date": "Wed, 14 Jul 2021 10:42:46 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Pull general SASL framework out of SCRAM" } ]
[ { "msg_contents": "(This is split off from my work on OAUTHBEARER [1].)\r\n\r\nThe jsonapi in src/common can't currently be compiled into libpq. The\r\nfirst patch here removes the dependency on pg_log_fatal(), which is not\r\navailable to the sharedlib. The second patch makes sure that all of the\r\nreturn values from json_errdetail() can be pfree'd, to avoid long-\r\nrunning leaks.\r\n\r\nIn the original thread, Michael Paquier commented:\r\n\r\n> +# define check_stack_depth()\r\n> +# ifdef JSONAPI_NO_LOG\r\n> +# define json_log_and_abort(...) \\\r\n> + do { fprintf(stderr, __VA_ARGS__); exit(1); } while(0)\r\n> +# else\r\n> In patch 0002, this is the wrong approach. libpq will not be able to\r\n> feed on such reports, and you cannot use any of the APIs from the\r\n> palloc() family either as these just fail on OOM. libpq should be\r\n> able to know about the error, and would fill in the error back to the\r\n> application. This abstraction is not necessary on HEAD as\r\n> pg_verifybackup is fine with this level of reporting. My rough guess\r\n> is that we will need to split the existing jsonapi.c into two files,\r\n> one that can be used in shared libraries and a second that handles the \r\n> errors.\r\n\r\nHmm. I'm honestly hesitant to start splitting files apart, mostly\r\nbecause json_log_and_abort() is only called from two places, and both\r\nthose places are triggered by programmer error as opposed to user\r\nerror.\r\n\r\nWould it make more sense to switch to an fprintf-and-abort case, to\r\nmatch the approach taken by PGTHREAD_ERROR and the out-of-memory\r\nconditions in fe-print.c? Or is there already precedent for handling\r\ncan't-happen code paths with in-band errors, through the PGconn?\r\n\r\n--Jacob\r\n\r\n[1] https://www.postgresql.org/message-id/d1b467a78e0e36ed85a09adf979d04cf124a9d4b.camel@vmware.com", "msg_date": "Tue, 22 Jun 2021 22:59:37 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "[PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "On Tue, Jun 22, 2021 at 10:59:37PM +0000, Jacob Champion wrote:\n> Hmm. I'm honestly hesitant to start splitting files apart, mostly\n> because json_log_and_abort() is only called from two places, and both\n> those places are triggered by programmer error as opposed to user\n> error.\n> \n> Would it make more sense to switch to an fprintf-and-abort case, to\n> match the approach taken by PGTHREAD_ERROR and the out-of-memory\n> conditions in fe-print.c? Or is there already precedent for handling\n> can't-happen code paths with in-band errors, through the PGconn?\n\nNot really..\n\nLooking more closely at that, I actually find a bit crazy the\nrequirement for any logging within jsonapi.c just to cope with the\nfact that json_errdetail() and report_parse_error() just want to track\ndown if the caller is giving some garbage or not, which should never\nbe the case, really. So I would be tempted to eliminate this\ndependency to begin with.\n\nThe second thing is how we should try to handle the way the error\nmessage gets allocated in json_errdetail(). libpq cannot rely on\npsprintf(), so I can think about two options here:\n- Let the caller of json_errdetail() allocate the memory area of the\nerror message by itself, pass it down to the function.\n- Do the allocation within json_errdetail(), and let callers cope the\ncase where json_errdetail() itself fails on OOM for any frontend code\nusing it.\n\nLooking at HEAD, the OAUTH patch and the token handling, the second\noption looks more interesting. I have to admit that handling the\ntoken part makes the patch a bit special, but that avoids duplicating\nall those error messages for libpq. Please see the idea as attached.\n--\nMichael", "msg_date": "Thu, 24 Jun 2021 14:56:04 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "On Thu, 2021-06-24 at 14:56 +0900, Michael Paquier wrote:\r\n> Looking more closely at that, I actually find a bit crazy the\r\n> requirement for any logging within jsonapi.c just to cope with the\r\n> fact that json_errdetail() and report_parse_error() just want to track\r\n> down if the caller is giving some garbage or not, which should never\r\n> be the case, really. So I would be tempted to eliminate this\r\n> dependency to begin with.\r\n\r\nI think that's a good plan.\r\n\r\n> The second thing is how we should try to handle the way the error\r\n> message gets allocated in json_errdetail(). libpq cannot rely on\r\n> psprintf(),\r\n\r\nThat surprised me. So there's currently no compiler-enforced\r\nprohibition, just a policy? It looks like the bar was lowered a little\r\nbit in commit c0cb87fbb6, as libpq currently has a symbol dependency on\r\npg_get_line_buf() and pfree() on my machine.\r\n\r\n> , so I can think about two options here:\r\n> - Let the caller of json_errdetail() allocate the memory area of the\r\n> error message by itself, pass it down to the function.\r\n> - Do the allocation within json_errdetail(), and let callers cope the\r\n> case where json_errdetail() itself fails on OOM for any frontend code\r\n> using it.\r\n> \r\n> Looking at HEAD, the OAUTH patch and the token handling, the second\r\n> option looks more interesting. I have to admit that handling the\r\n> token part makes the patch a bit special, but that avoids duplicating\r\n> all those error messages for libpq. Please see the idea as attached.\r\n\r\nI prefer the second approach as well. Looking at the sample\r\nimplementation -- has an allocating sprintf() for libpq really not been\r\nimplemented before? Doing it ourselves on the stack gives me some\r\nheartburn; at the very least we'll have to make careful use of snprintf\r\nso as to not smash the stack while parsing malicious JSON.\r\n\r\nIf our libpq-specific implementation is going to end up returning NULL\r\non bad allocation anyway, could we just modify the behavior of the\r\nexisting front-end palloc implementation to not exit() from inside\r\nlibpq? That would save a lot of one-off code for future implementers.\r\n\r\n--Jacob\r\n", "msg_date": "Fri, 25 Jun 2021 20:58:46 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "On Fri, Jun 25, 2021 at 08:58:46PM +0000, Jacob Champion wrote:\n> On Thu, 2021-06-24 at 14:56 +0900, Michael Paquier wrote:\n>> Looking more closely at that, I actually find a bit crazy the\n>> requirement for any logging within jsonapi.c just to cope with the\n>> fact that json_errdetail() and report_parse_error() just want to track\n>> down if the caller is giving some garbage or not, which should never\n>> be the case, really. So I would be tempted to eliminate this\n>> dependency to begin with.\n> \n> I think that's a good plan.\n\nWe could do this cleanup first, as an independent patch. That's\nsimple enough. I am wondering if we'd better do this bit in 14\nactually, so as the divergence between 15~ and 14 is lightly\nminimized.\n\n>> The second thing is how we should try to handle the way the error\n>> message gets allocated in json_errdetail(). libpq cannot rely on\n>> psprintf(),\n> \n> That surprised me. So there's currently no compiler-enforced\n> prohibition, just a policy? It looks like the bar was lowered a little\n> bit in commit c0cb87fbb6, as libpq currently has a symbol dependency on\n> pg_get_line_buf() and pfree() on my machine.\n\nGood point. That's worse than just pfree() which is just a plain call\nto free() in the frontend. We could have more policies here, but my\ntake is that we'd better move fe_memutils.o to OBJS_FRONTEND in\nsrc/common/Makefile so as shared libraries don't use those routines in\nthe long term.\n\nIn parseServiceFile(), initStringInfo() does a palloc() which would\nsimply exit() on OOM, in libpq. That's not good. The service file\nparsing is the only piece in libpq using StringInfoData. @Tom,\n@Daniel, you got involved in c0cb87f. It looks like this piece about\nthe limitations with service file parsing needs a rework. This code\nis new in 14, which means a new open item.\n\n> If our libpq-specific implementation is going to end up returning NULL\n> on bad allocation anyway, could we just modify the behavior of the\n> existing front-end palloc implementation to not exit() from inside\n> libpq? That would save a lot of one-off code for future implementers.\n\nYeah, a side effect of that is to enforce a new rule for any frontend\ncode that calls palloc(), and these could be easily exposed to crashes\nwithin knowing about it until their system is under resource\npressure. Silent breakages with very old guaranteed behaviors is\nbad.\n--\nMichael", "msg_date": "Sat, 26 Jun 2021 09:36:42 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "> On 26 Jun 2021, at 02:36, Michael Paquier <michael@paquier.xyz> wrote:\n\n> The service file parsing is the only piece in libpq using StringInfoData.\n> @Tom, @Daniel, you got involved in c0cb87f. It looks like this piece about the\n> limitations with service file parsing needs a rework. This code is new in 14,\n> which means a new open item.\n\n\nReworking it at this point to use a pqexpbuffer would be too invasive for 14\nIMO, so reverting this part seems like the best option, and then redo it with\na pqexpbuffer for 15.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Sat, 26 Jun 2021 10:13:38 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "Michael Paquier <michael@paquier.xyz> writes:\n> On Fri, Jun 25, 2021 at 08:58:46PM +0000, Jacob Champion wrote:\n>> That surprised me. So there's currently no compiler-enforced\n>> prohibition, just a policy? It looks like the bar was lowered a little\n>> bit in commit c0cb87fbb6, as libpq currently has a symbol dependency on\n>> pg_get_line_buf() and pfree() on my machine.\n\n> Good point. That's worse than just pfree() which is just a plain call\n> to free() in the frontend. We could have more policies here, but my\n> take is that we'd better move fe_memutils.o to OBJS_FRONTEND in\n> src/common/Makefile so as shared libraries don't use those routines in\n> the long term.\n\nUgh. Not only is that bad, but your proposed fix doesn't fix it.\nAt least in psql, and probably in most/all of our other clients,\nremoving fe_memutils.o from libpq's link just causes it to start\nrelying on the copy in the psql executable :-(. So I agree that\nsome sort of mechanical enforcement would be a really good thing,\nbut I'm not sure what it would look like.\n\n> In parseServiceFile(), initStringInfo() does a palloc() which would\n> simply exit() on OOM, in libpq. That's not good. The service file\n> parsing is the only piece in libpq using StringInfoData. @Tom,\n> @Daniel, you got involved in c0cb87f.\n\nI concur with Daniel that the easiest fix for v14 is to revert\nc0cb87f. Allowing unlimited-length lines in the service file seems\nlike a nice-to-have, but it's not worth a lot. (Looking at the patch,\nI'm inclined to keep much of the code rearrangement, just remove the\ndependency on stringinfo.c. Also I'm tempted to set the fixed buffer\nsize at 1024 not 256, after which we might never need to improve it.)\n\nI spent some time looking for other undesirable symbol dependencies\nin libpq, and soon found a couple. PGTHREAD_ERROR potentially calls\nabort(), which seems even worse than exit-on-OOM, although I don't\nthink we've ever heard a report of that being hit. Also,\nfe-print.c's handling of OOM isn't nice at all:\n\n fprintf(stderr, libpq_gettext(\"out of memory\\n\"));\n abort();\n\nAlthough fe-print.c is semi-deprecated, it still seems like it'd\nbe a good idea to clean that up.\n\nBTW, so far as the original topic of this thread is concerned,\nit sounds like Jacob's ultimate goal is to put some functionality\ninto libpq that requires JSON parsing. I'm going to say up front\nthat that sounds like a terrible idea. As we've just seen, libpq\noperates under very tight constraints, not all of which are\nmechanically enforced. I am really doubtful that anything that\nwould require a JSON parser has any business being in libpq.\nUnless you can sell us on that point, I do not think it's worth\ncomplicating the src/common JSON code to the point where it can\nwork under libpq's constraints.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sat, 26 Jun 2021 13:43:50 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "I wrote:\n> I spent some time looking for other undesirable symbol dependencies\n> in libpq, and soon found a couple. PGTHREAD_ERROR potentially calls\n> abort(), which seems even worse than exit-on-OOM, although I don't\n> think we've ever heard a report of that being hit. Also,\n> fe-print.c's handling of OOM isn't nice at all:\n> fprintf(stderr, libpq_gettext(\"out of memory\\n\"));\n> abort();\n> Although fe-print.c is semi-deprecated, it still seems like it'd\n> be a good idea to clean that up.\n\nfe-print.c seems easy enough to clean up, as per attached.\nNot real sure what to do about PGTHREAD_ERROR.\n\n\t\t\tregards, tom lane", "msg_date": "Sat, 26 Jun 2021 15:22:43 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "I wrote:\n> Not real sure what to do about PGTHREAD_ERROR.\n\nI wonder if we shouldn't simply nuke that macro and change the\ncall sites into \"Assert(false)\". The only call sites are in\ndefault_threadlock() (used in fe_auth.c) and pq_lockingcallback()\n(for OpenSSL). I suggest that\n\n1. \"fprintf(stderr)\" in these locking functions doesn't seem\nremarkably well-advised either. Especially not on Windows;\nbut in general, we don't expect libpq to emit stuff on stderr\nexcept under *very* limited circumstances.\n\n2. In an assert-enabled build, Assert() ought to be about equivalent\nto abort().\n\n3. In a production build, if one of these mutex calls fails, ignoring\nthe failure might be the best thing to do anyway. Certainly, dumping\ncore is the worst possible outcome, while not doing anything would\nhave no impact except in the rather-unlikely case that multiple libpq\nconnections try to use this code concurrently.\n\nIt's certainly possible to quibble about point 3, but unless you\nhave a better alternative to offer, I don't think you have a lot\nof room to complain.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sat, 26 Jun 2021 18:21:49 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "On Sat, Jun 26, 2021 at 01:43:50PM -0400, Tom Lane wrote:\n> BTW, so far as the original topic of this thread is concerned,\n> it sounds like Jacob's ultimate goal is to put some functionality\n> into libpq that requires JSON parsing. I'm going to say up front\n> that that sounds like a terrible idea. As we've just seen, libpq\n> operates under very tight constraints, not all of which are\n> mechanically enforced. I am really doubtful that anything that\n> would require a JSON parser has any business being in libpq.\n> Unless you can sell us on that point, I do not think it's worth\n> complicating the src/common JSON code to the point where it can\n> work under libpq's constraints.\n\nAFAIK, the SASL mechanism OAUTHBEARER described in RFC 7628 would\nrequire such facilities as failures are reported in this format:\nhttps://datatracker.ietf.org/doc/html/rfc7628\n\nPerhaps you are right and we have no need to do any json parsing in\nlibpq as long as we pass down the JSON blob, but I am not completely\nsure if we can avoid that either.\n\nSeparate topic: I find disturbing the dependency of jsonapi.c to\nthe logging parts just to cope with dummy error values that are\noriginally part of JsonParseErrorType.\n--\nMichael", "msg_date": "Sun, 27 Jun 2021 10:43:00 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "I wrote:\n>> Not real sure what to do about PGTHREAD_ERROR.\n\n> I wonder if we shouldn't simply nuke that macro and change the\n> call sites into \"Assert(false)\".\n\nAfter further study this still seems like the best available choice.\nWe do not have the option to make either default_threadlock() or\npq_lockingcallback() do something saner, like return a failure\nindication. pq_lockingcallback()'s API is dictated by OpenSSL,\nwhile default_threadlock()'s API is exposed to users by libpq\n(IOW, we could have gotten that one right years ago, but we\nfailed to, and it seems much too late to change it now).\n\nAlso, I trawled the mailing list archives, and I can find no\nindication that any of the PGTHREAD_ERROR messages have ever\nbeen seen in the field. The last relevant discussion seems\nto be in\n\nhttps://www.postgresql.org/message-id/flat/20130801142443.GO2706%40tamriel.snowman.net\n\nwhere it was observed that this code isn't very well thought\nthrough :-(\n\nMy proposal is to replace PGTHREAD_ERROR by Assert(false)\nin HEAD, but leave things alone in the back branches.\n\nAs far as the other patch to check for mistakes with \"nm\"\ngoes, we could either do nothing in the back branches, or\ninstall a check for \"exit\" only, not \"abort\". But there's\nprobably no real need for such a check in the back branches\nas long as we're enforcing it in HEAD.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 28 Jun 2021 15:15:47 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "> On 28 Jun 2021, at 21:15, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n\n> I wrote:\n>>> Not real sure what to do about PGTHREAD_ERROR.\n> \n>> I wonder if we shouldn't simply nuke that macro and change the\n>> call sites into \"Assert(false)\".\n> \n> After further study this still seems like the best available choice.\n\nWhile this solution has a potential downside as you mention upthread, I can't\nsee any better alternative, and this is clearly better than what we have now.\n\n> My proposal is to replace PGTHREAD_ERROR by Assert(false)\n> in HEAD, but leave things alone in the back branches.\n\n+1\n\n> As far as the other patch to check for mistakes with \"nm\"\n> goes, we could either do nothing in the back branches, or\n> install a check for \"exit\" only, not \"abort\". But there's\n> probably no real need for such a check in the back branches\n> as long as we're enforcing it in HEAD.\n\nI don't see any real reason to backport the check, but enforce it in HEAD going\nforward. The risk of introducing an exit in backbranches when enforced against\nin HEAD seem pretty manageable.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 22:08:10 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "On Sun, 2021-06-27 at 10:43 +0900, Michael Paquier wrote:\r\n> On Sat, Jun 26, 2021 at 01:43:50PM -0400, Tom Lane wrote:\r\n> > BTW, so far as the original topic of this thread is concerned,\r\n> > it sounds like Jacob's ultimate goal is to put some functionality\r\n> > into libpq that requires JSON parsing. I'm going to say up front\r\n> > that that sounds like a terrible idea. As we've just seen, libpq\r\n> > operates under very tight constraints, not all of which are\r\n> > mechanically enforced. I am really doubtful that anything that\r\n> > would require a JSON parser has any business being in libpq.\r\n> > Unless you can sell us on that point, I do not think it's worth\r\n> > complicating the src/common JSON code to the point where it can\r\n> > work under libpq's constraints.\r\n> \r\n> AFAIK, the SASL mechanism OAUTHBEARER described in RFC 7628 would\r\n> require such facilities as failures are reported in this format:\r\n> https://datatracker.ietf.org/doc/html/rfc7628\r\n\r\nRight. So it really comes down to whether or not OAUTHBEARER support is\r\nworth this additional complication, and that probably belongs to the\r\nmain thread on the topic.\r\n\r\nBut hey, we're getting some code cleanup out of the deal either way.\r\n\r\n> Perhaps you are right and we have no need to do any json parsing in\r\n> libpq as long as we pass down the JSON blob, but I am not completely\r\n> sure if we can avoid that either.\r\n\r\nIt is definitely an option.\r\n\r\nWith the current architecture of the proof-of-concept, I feel like\r\nforcing every client to implement JSON parsing just to be able to use\r\nOAUTHBEARER would be a significant barrier to entry. Again, that's\r\nprobably conversation for the main thread.\r\n\r\n> Separate topic: I find disturbing the dependency of jsonapi.c to\r\n> the logging parts just to cope with dummy error values that are\r\n> originally part of JsonParseErrorType.\r\n\r\nI think we should clean this up regardless.\r\n\r\n--Jacob\r\n", "msg_date": "Tue, 29 Jun 2021 18:09:43 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "On Sat, 2021-06-26 at 09:36 +0900, Michael Paquier wrote:\r\n> On Fri, Jun 25, 2021 at 08:58:46PM +0000, Jacob Champion wrote:\r\n> > On Thu, 2021-06-24 at 14:56 +0900, Michael Paquier wrote:\r\n> > > Looking more closely at that, I actually find a bit crazy the\r\n> > > requirement for any logging within jsonapi.c just to cope with the\r\n> > > fact that json_errdetail() and report_parse_error() just want to track\r\n> > > down if the caller is giving some garbage or not, which should never\r\n> > > be the case, really. So I would be tempted to eliminate this\r\n> > > dependency to begin with.\r\n> > \r\n> > I think that's a good plan.\r\n> \r\n> We could do this cleanup first, as an independent patch. That's\r\n> simple enough. I am wondering if we'd better do this bit in 14\r\n> actually, so as the divergence between 15~ and 14 is lightly\r\n> minimized.\r\n\r\nUp to you in the end; I don't have a good intuition for whether the\r\ncode motion would be worth it for 14, if it's not actively used.\r\n\r\n> > > The second thing is how we should try to handle the way the error\r\n> > > message gets allocated in json_errdetail(). libpq cannot rely on\r\n> > > psprintf(),\r\n> > \r\n> > That surprised me. So there's currently no compiler-enforced\r\n> > prohibition, just a policy? It looks like the bar was lowered a little\r\n> > bit in commit c0cb87fbb6, as libpq currently has a symbol dependency on\r\n> > pg_get_line_buf() and pfree() on my machine.\r\n\r\nThis seems to have spawned an entirely new thread over the weekend,\r\nwhich I will watch with interest. :)\r\n\r\n> > If our libpq-specific implementation is going to end up returning NULL\r\n> > on bad allocation anyway, could we just modify the behavior of the\r\n> > existing front-end palloc implementation to not exit() from inside\r\n> > libpq? That would save a lot of one-off code for future implementers.\r\n> \r\n> Yeah, a side effect of that is to enforce a new rule for any frontend\r\n> code that calls palloc(), and these could be easily exposed to crashes\r\n> within knowing about it until their system is under resource\r\n> pressure. Silent breakages with very old guaranteed behaviors is\r\n> bad.\r\n\r\nFair point.\r\n\r\nWhat would you think about a src/port of asprintf()? Maybe libpq\r\ndoesn't change quickly enough to worry about it, but having developers\r\nrevisit stack allocation for strings every time they target the libpq\r\nparts of the code seems like a recipe for security problems.\r\n\r\n--Jacob\r\n", "msg_date": "Tue, 29 Jun 2021 18:09:54 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "Jacob Champion <pchampion@vmware.com> writes:\n> What would you think about a src/port of asprintf()? Maybe libpq\n> doesn't change quickly enough to worry about it, but having developers\n> revisit stack allocation for strings every time they target the libpq\n> parts of the code seems like a recipe for security problems.\n\nThe existing convention is to use pqexpbuffer.c, which seems strictly\ncleaner and more robust than asprintf. In particular its behavior under\nOOM conditions is far easier/safer to work with. Maybe we should consider\nmoving that into src/common/ so that it can be used by code that's not\ntightly bound into libpq?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Tue, 29 Jun 2021 14:50:20 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "On Tue, 2021-06-29 at 14:50 -0400, Tom Lane wrote:\r\n> Jacob Champion <pchampion@vmware.com> writes:\r\n> > What would you think about a src/port of asprintf()? Maybe libpq\r\n> > doesn't change quickly enough to worry about it, but having developers\r\n> > revisit stack allocation for strings every time they target the libpq\r\n> > parts of the code seems like a recipe for security problems.\r\n> \r\n> The existing convention is to use pqexpbuffer.c, which seems strictly\r\n> cleaner and more robust than asprintf. In particular its behavior under\r\n> OOM conditions is far easier/safer to work with. Maybe we should consider\r\n> moving that into src/common/ so that it can be used by code that's not\r\n> tightly bound into libpq?\r\n\r\nI will take a look. Were you thinking we'd (hypothetically) migrate all\r\nstring allocation code under src/common to pqexpbuffer as part of that\r\nmove? Or just have it there to use as needed, when nm complains?\r\n\r\n--Jacob\r\n", "msg_date": "Tue, 29 Jun 2021 19:26:47 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "Jacob Champion <pchampion@vmware.com> writes:\n> On Tue, 2021-06-29 at 14:50 -0400, Tom Lane wrote:\n>> The existing convention is to use pqexpbuffer.c, which seems strictly\n>> cleaner and more robust than asprintf. In particular its behavior under\n>> OOM conditions is far easier/safer to work with. Maybe we should consider\n>> moving that into src/common/ so that it can be used by code that's not\n>> tightly bound into libpq?\n\n> I will take a look. Were you thinking we'd (hypothetically) migrate all\n> string allocation code under src/common to pqexpbuffer as part of that\n> move? Or just have it there to use as needed, when nm complains?\n\nActually, I'd forgotten that the PQExpBuffer functions are already\nexported by libpq, and much of our frontend code already uses them\nfrom there. So we don't really need to move anything unless there's\na call to use this code in clients that don't use libpq, which are\na pretty small set.\n\nAlso, having them be available both from libpq.so and from libpgcommon.a\nwould be a tad problematic I think; it'd be hard to tell which way the\nlinker would choose to resolve that.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Tue, 29 Jun 2021 15:34:29 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "On 26.06.21 19:43, Tom Lane wrote:\n> I spent some time looking for other undesirable symbol dependencies\n> in libpq, and soon found a couple. PGTHREAD_ERROR potentially calls\n> abort(), which seems even worse than exit-on-OOM, although I don't\n> think we've ever heard a report of that being hit. Also,\n> fe-print.c's handling of OOM isn't nice at all:\n> \n> fprintf(stderr, libpq_gettext(\"out of memory\\n\"));\n> abort();\n> \n> Although fe-print.c is semi-deprecated, it still seems like it'd\n> be a good idea to clean that up.\n\nThese abort() calls were put there on purpose by:\n\ncommit c6ea8ccea6bf23501962ddc7ac9ffdb99c8643e1\nAuthor: Peter Eisentraut <peter_e@gmx.net>\nDate: Mon Jan 30 20:34:00 2012\n\n Use abort() instead of exit() to abort library functions\n\n In some hopeless situations, certain library functions in libpq and\n libpgport quit the program. Use abort() for that instead of exit(),\n so we don't interfere with the normal exit codes the program might\n use, we clearly signal the abnormal termination, and the caller has a\n chance of catching the termination.\n\n This was originally pointed out by Debian's Lintian program.\n\n\nI don't object to refining this, but I think it is a mischaracterization \nto calls this kind of code wrong. And I'm dubious about the backpatching.\n\n\n", "msg_date": "Wed, 30 Jun 2021 19:13:16 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n> On 26.06.21 19:43, Tom Lane wrote:\n>> fe-print.c's handling of OOM isn't nice at all:\n>> \tfprintf(stderr, libpq_gettext(\"out of memory\\n\"));\n>> \tabort();\n>> Although fe-print.c is semi-deprecated, it still seems like it'd\n>> be a good idea to clean that up.\n\n> These abort() calls were put there on purpose by:\n> commit c6ea8ccea6bf23501962ddc7ac9ffdb99c8643e1\n> Use abort() instead of exit() to abort library functions\n\nWell, the exit() calls that that replaced were quite inappropriate\ntoo IMO. I don't think it boots much to argue about which way was\nless bad; libpq has no business doing either thing.\n\nWhat might be more useful is to consider whether there's a way\nto retrofit an error-reporting convention onto these functions.\nI thought about that for a bit, but concluded that the possible\ninteractions with stdio's error handling made that fairly tricky,\nand it didn't seem worth messing with for such backwater code.\n(Too bad POSIX didn't see fit to provide seterr(FILE*), or maybe\nwe could have reported OOM in fe-print that way.)\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 30 Jun 2021 16:13:56 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "On Tue, Jun 29, 2021 at 03:34:29PM -0400, Tom Lane wrote:\n> Actually, I'd forgotten that the PQExpBuffer functions are already\n> exported by libpq, and much of our frontend code already uses them\n> from there. So we don't really need to move anything unless there's\n> a call to use this code in clients that don't use libpq, which are\n> a pretty small set.\n> \n> Also, having them be available both from libpq.so and from libpgcommon.a\n> would be a tad problematic I think; it'd be hard to tell which way the\n> linker would choose to resolve that.\n\nComing back to this thread. You were thinking about switching to\nPQExpBuffer for the error strings generated depending on error code\nfor the frontend, right? Using a PQExpBuffer would be a problem if\nattempting to get a more detailed error for pg_verifybackup, though I\nguess that we can continue to live in this tool without this much\namount of details in the error strings.\n\nIt seems to me that this does not address yet the problems with the\npalloc/pstrdup in jsonapi.c though, which would need to rely on\nmalloc() if we finish to use this code in libpq. I am not sure yet\nthat we have any need to do that yet as we may finish by not using\nOAUTH as SASL mechanism at the end in core. So perhaps it would be\nbetter to just give up on this thread for now?\n--\nMichael", "msg_date": "Wed, 7 Jul 2021 14:36:14 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "Michael Paquier <michael@paquier.xyz> writes:\n> It seems to me that this does not address yet the problems with the\n> palloc/pstrdup in jsonapi.c though, which would need to rely on\n> malloc() if we finish to use this code in libpq. I am not sure yet\n> that we have any need to do that yet as we may finish by not using\n> OAUTH as SASL mechanism at the end in core. So perhaps it would be\n> better to just give up on this thread for now?\n\nYeah, I think there's nothing to do here unless we decide that we\nhave to have JSON-parsing ability inside libpq ... which is a\nsituation I think we should try hard to avoid.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 07 Jul 2021 01:42:00 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" }, { "msg_contents": "On Wed, 2021-07-07 at 01:42 -0400, Tom Lane wrote:\r\n> Michael Paquier <michael@paquier.xyz> writes:\r\n> > It seems to me that this does not address yet the problems with the\r\n> > palloc/pstrdup in jsonapi.c though, which would need to rely on\r\n> > malloc() if we finish to use this code in libpq. I am not sure yet\r\n> > that we have any need to do that yet as we may finish by not using\r\n> > OAUTH as SASL mechanism at the end in core. So perhaps it would be\r\n> > better to just give up on this thread for now?\r\n> \r\n> Yeah, I think there's nothing to do here unless we decide that we\r\n> have to have JSON-parsing ability inside libpq ... which is a\r\n> situation I think we should try hard to avoid.\r\n\r\nI'm working on a corrected version of the allocation for the OAuth\r\nproof of concept, so we can see what it might look like there. I will\r\nwithdraw this one from the commitfest. Thanks for all the feedback!\r\n\r\n--Jacob\r\n", "msg_date": "Wed, 7 Jul 2021 14:57:53 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Make jsonapi usable from libpq" } ]
[ { "msg_contents": "Hi,\n\nWhen directly INSERT INTO partition, postgres will invoke ExecPartitionCheck\nwhich will execute its parent's and grandparent's partition constraint check.\n From the code, the whole constraint check is saved in relcache::rd_partcheck.\n\nFor a multi-level partition, for example: table 'A' is partition of table\n'B', and 'B' is also partition of table 'C'. After I 'ALTER TABLE C DETACH B',\nI thought partition constraint check of table 'C' does not matter anymore if\nINSERT INTO table 'A'. But it looks like the relcache of 'A' is not invalidated\nafter detaching 'B'. And the relcache::rd_partcheck still include the partition\nconstraint of table 'C'. Note If I invalidate the table 'A''s relcache manually,\nthen next time the relcache::rd_partcheck will be updated to the expected\none which does not include partition constraint check of table 'C'.\n(ATTACH partition has the same behaviour that relcache::rd_partcheck will\nnot be updated immediately)\n\nDoes it work as expected ? I didn't find some explanation from the doc.\n(sorry if I missed something).\n\nBest regards,\nhouzj\n\n\n", "msg_date": "Wed, 23 Jun 2021 04:16:10 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": true, "msg_subject": "Partition Check not updated when insert into a partition" }, { "msg_contents": "On Wednesday, June 23, 2021 12:16 PM I wrote:\n> When directly INSERT INTO partition, postgres will invoke ExecPartitionCheck\n> which will execute its parent's and grandparent's partition constraint check.\n> From the code, the whole constraint check is saved in relcache::rd_partcheck.\n> \n> For a multi-level partition, for example: table 'A' is partition of table 'B', and 'B'\n> is also partition of table 'C'. After I 'ALTER TABLE C DETACH B', I thought\n> partition constraint check of table 'C' does not matter anymore if INSERT INTO\n> table 'A'. But it looks like the relcache of 'A' is not invalidated after detaching 'B'.\n> And the relcache::rd_partcheck still include the partition constraint of table 'C'.\n> Note If I invalidate the table 'A''s relcache manually, then next time the\n> relcache::rd_partcheck will be updated to the expected one which does not\n> include partition constraint check of table 'C'.\n> (ATTACH partition has the same behaviour that relcache::rd_partcheck will not\n> be updated immediately)\n\nAn DETACH PARTITION example which shows the relcache::rd_partcheck\nis not invalidated immediately is:\n\n----- parttable1 -> parttable2-> parttable3\ncreate table parttable1 (a int, b int, c int) partition by list(a);\ncreate table parttable2 (a int, b int, c int) partition by list(b);\ncreate table parttable3 (a int, b int, c int);\nalter table parttable1 attach partition parttable2 for values in (1);\nalter table parttable2 attach partition parttable3 for values in (1);\n\n-----\n-----INSERT a tuple into parttable3 which does not satisfy parttable1's partition constraint\n-----we will get an error\n-----\ninsert into parttable3 values(2,1,1);\nERROR: new row for relation \"parttable3\" violates partition constraint\nDETAIL: Failing row contains (2, 1, 1).\n\n-----\n----- parttable1 is no longer the grandparent of parttable3.\n----- I thought the partition constraint of parttable1 does not matter anymore\n-----\nalter table parttable1 detach partition parttable2;\n\n-----\n-----INSERT a tuple into parttable3 which does not satisfy parttable1's partition constraint\n----- *** I expect a successful insertion, but it returns an error again. ***\n-----\ninsert into parttable3 values(2,1,1);\nERROR: new row for relation \"parttable3\" violates partition constraint\nDETAIL: Failing row contains (2, 1, 1).\n\nRECONNECT\n-----\n-----Reconnect the postgres which will invalidate the relcache\n----- INSERT a tuple into parttable3 which does not satisfy parttable1's partition constraint\n----- We succeeded this time as expected.\n-----\ninsert into parttable3 values(2,1,1);\nINSERT 0 1\n\n\nBest regards,\nhouzj\n\n\n\n\n", "msg_date": "Wed, 23 Jun 2021 06:40:17 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": true, "msg_subject": "RE: Partition Check not updated when insert into a partition" }, { "msg_contents": "On 2021-Jun-23, houzj.fnst@fujitsu.com wrote:\n\n> For a multi-level partition, for example: table 'A' is partition of table\n> 'B', and 'B' is also partition of table 'C'. After I 'ALTER TABLE C DETACH B',\n> I thought partition constraint check of table 'C' does not matter anymore if\n> INSERT INTO table 'A'. But it looks like the relcache of 'A' is not invalidated\n> after detaching 'B'. And the relcache::rd_partcheck still include the partition\n> constraint of table 'C'. Note If I invalidate the table 'A''s relcache manually,\n> then next time the relcache::rd_partcheck will be updated to the expected\n> one which does not include partition constraint check of table 'C'.\n\nHmm, if I understand correctly, this means that we need to invalidate\nrelcache for all partitions of the partition being detached. Maybe like\nin the attached WIP (\"XXX VERY CRUDE XXX DANGER EATS DATA\") patch, which\nsolves what you complained about, but I didn't run any other tests.\n(Also, in the concurrent case I think this should be done during the\nfirst transaction, so this patch is wrong for it.)\n\nDid you have a misbehaving test for the ATTACH case?\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\"I dream about dreams about dreams\", sang the nightingale\nunder the pale moon (Sandman)", "msg_date": "Mon, 12 Jul 2021 14:51:37 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" }, { "msg_contents": "On Tuesday, July 13, 2021 2:52 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\r\n> On 2021-Jun-23, houzj.fnst@fujitsu.com wrote:\r\n> \r\n> > For a multi-level partition, for example: table 'A' is partition of\r\n> > table 'B', and 'B' is also partition of table 'C'. After I 'ALTER\r\n> > TABLE C DETACH B', I thought partition constraint check of table 'C'\r\n> > does not matter anymore if INSERT INTO table 'A'. But it looks like\r\n> > the relcache of 'A' is not invalidated after detaching 'B'. And the\r\n> > relcache::rd_partcheck still include the partition constraint of table\r\n> > 'C'. Note If I invalidate the table 'A''s relcache manually, then next\r\n> > time the relcache::rd_partcheck will be updated to the expected one which\r\n> does not include partition constraint check of table 'C'.\r\n> \r\n> Hmm, if I understand correctly, this means that we need to invalidate relcache\r\n> for all partitions of the partition being detached. Maybe like in the attached\r\n> WIP (\"XXX VERY CRUDE XXX DANGER EATS DATA\") patch, which solves what\r\n> you complained about, but I didn't run any other tests.\r\n> (Also, in the concurrent case I think this should be done during the first\r\n> transaction, so this patch is wrong for it.)\r\n> \r\n> Did you have a misbehaving test for the ATTACH case?\r\n\r\nThanks for the response.\r\n\r\nYes, I think the following example of ATTACH doesn't work as expected.\r\n\r\n---------------------------------------------------------------\r\ncreate table parttable1 (a int, b int, c int) partition by list(a);\r\ncreate table parttable2 (a int, b int, c int) partition by list(b);\r\ncreate table parttable3 (a int, b int, c int);\r\nalter table parttable2 attach partition parttable3 for values in (1);\r\n\r\n-----\r\n----- INSERT a tuple into parttable3\r\n----- Cache the partitioncheck in relcache::rd_partcheck\r\n-----\r\ninsert into parttable3 values(1, 1, 0);\r\n\r\n----- Attach a new top parent\r\nalter table parttable1 attach partition parttable2 for values in (1);\r\n\r\n-----\r\n----- INSERT a tuple which doesn't satisfy the new top parent(parttable1)'s partitioncheck\r\n----- But the INSERT will succeed which looks not as expected.\r\n-----\r\ninsert into parttable3 values(999, 1, 0);\r\n\r\n-----\r\n----- And when I reconnect to clean the cache\r\n----- INSERT a tuple which doesn't satisfy the new top parent(parttable1)'s partitioncheck\r\n----- INSERT will fail due to partition check violation.\r\n-----\r\ninsert into parttable3 values(999, 1, 0);\r\n\r\nBest regards,\r\nHou zhijie\r\n", "msg_date": "Wed, 14 Jul 2021 02:15:49 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": true, "msg_subject": "RE: Partition Check not updated when insert into a partition" }, { "msg_contents": "Sorry that I missed this thread.\n\nOn Wed, Jul 14, 2021 at 11:16 AM houzj.fnst@fujitsu.com\n<houzj.fnst@fujitsu.com> wrote:\n> On Tuesday, July 13, 2021 2:52 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > On 2021-Jun-23, houzj.fnst@fujitsu.com wrote:\n> >\n> > > For a multi-level partition, for example: table 'A' is partition of\n> > > table 'B', and 'B' is also partition of table 'C'. After I 'ALTER\n> > > TABLE C DETACH B', I thought partition constraint check of table 'C'\n> > > does not matter anymore if INSERT INTO table 'A'. But it looks like\n> > > the relcache of 'A' is not invalidated after detaching 'B'. And the\n> > > relcache::rd_partcheck still include the partition constraint of table\n> > > 'C'. Note If I invalidate the table 'A''s relcache manually, then next\n> > > time the relcache::rd_partcheck will be updated to the expected one which\n> > does not include partition constraint check of table 'C'.\n> >\n> > Hmm, if I understand correctly, this means that we need to invalidate relcache\n> > for all partitions of the partition being detached. Maybe like in the attached\n> > WIP (\"XXX VERY CRUDE XXX DANGER EATS DATA\") patch, which solves what\n> > you complained about, but I didn't run any other tests.\n> > (Also, in the concurrent case I think this should be done during the first\n> > transaction, so this patch is wrong for it.)\n> >\n> > Did you have a misbehaving test for the ATTACH case?\n>\n> Thanks for the response.\n\nThank you both.\n\n> Yes, I think the following example of ATTACH doesn't work as expected.\n\nYeah, need the fix for the ATTACH case too.\n\nCouple more things:\n\n* We must invalidate not just the \"direct\" partitions of the table\nbeing attached/detached, but also any indirect ones, because all of\ntheir partition constraints would need to contain (or no longer\ncontain) the root parent's partition constraint.\n\n* I think we should lock the partitions before sending the\ninvalidation. The ATTACH code already locks the descendents for a\ndifferent purpose, but DETACH doesn't, so the latter needs to be fixed\nto match.\n\nI've updated Alvaro's patch to address these points. Maybe, we should\nalso add these cases to the regression and isolation suites?\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Thu, 5 Aug 2021 11:32:58 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" }, { "msg_contents": "On Thu, Aug 5, 2021 at 11:32 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Wed, Jul 14, 2021 at 11:16 AM houzj.fnst@fujitsu.com\n> > On Tuesday, July 13, 2021 2:52 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > > Did you have a misbehaving test for the ATTACH case?\n> >\n> > Thanks for the response.\n>\n> Thank you both.\n>\n> > Yes, I think the following example of ATTACH doesn't work as expected.\n>\n> Yeah, need the fix for the ATTACH case too.\n>\n> Couple more things:\n>\n> * We must invalidate not just the \"direct\" partitions of the table\n> being attached/detached, but also any indirect ones, because all of\n> their partition constraints would need to contain (or no longer\n> contain) the root parent's partition constraint.\n>\n> * I think we should lock the partitions before sending the\n> invalidation. The ATTACH code already locks the descendents for a\n> different purpose, but DETACH doesn't, so the latter needs to be fixed\n> to match.\n>\n> I've updated Alvaro's patch to address these points. Maybe, we should\n> also add these cases to the regression and isolation suites?\n\nApparently, I had posted a version of the patch that didn't even compile.\n\nI have fixed that in the attached and also added regression tests.\n\nAdding this to the next CF.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 27 Aug 2021 18:20:11 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" }, { "msg_contents": "I have reviewed the patch and it looks good to me. However I have one comment.\n\n+ foreach(l, attachrel_children)\n+ {\n+ Oid partOid = lfirst_oid(l);\n+\n+ CacheInvalidateRelcacheByRelid(partOid);\n+ }\n\nCan we avoid using the extra variable 'partOid' and directly pass\n'lfirst_oid(l)' to CacheInvalidateRelcacheByRelid().\n\nThanks & Regards,\nNitin Jadhav\n\nOn Fri, Aug 27, 2021 at 2:50 PM Amit Langote <amitlangote09@gmail.com> wrote:\n>\n> On Thu, Aug 5, 2021 at 11:32 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> > On Wed, Jul 14, 2021 at 11:16 AM houzj.fnst@fujitsu.com\n> > > On Tuesday, July 13, 2021 2:52 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > > > Did you have a misbehaving test for the ATTACH case?\n> > >\n> > > Thanks for the response.\n> >\n> > Thank you both.\n> >\n> > > Yes, I think the following example of ATTACH doesn't work as expected.\n> >\n> > Yeah, need the fix for the ATTACH case too.\n> >\n> > Couple more things:\n> >\n> > * We must invalidate not just the \"direct\" partitions of the table\n> > being attached/detached, but also any indirect ones, because all of\n> > their partition constraints would need to contain (or no longer\n> > contain) the root parent's partition constraint.\n> >\n> > * I think we should lock the partitions before sending the\n> > invalidation. The ATTACH code already locks the descendents for a\n> > different purpose, but DETACH doesn't, so the latter needs to be fixed\n> > to match.\n> >\n> > I've updated Alvaro's patch to address these points. Maybe, we should\n> > also add these cases to the regression and isolation suites?\n>\n> Apparently, I had posted a version of the patch that didn't even compile.\n>\n> I have fixed that in the attached and also added regression tests.\n>\n> Adding this to the next CF.\n>\n> --\n> Amit Langote\n> EDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 27 Aug 2021 20:16:33 +0530", "msg_from": "Nitin Jadhav <nitinjadhavpostgres@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" }, { "msg_contents": "Hi, hackers!\n\nWe've reviewed patch v3 and found it right. Completely agree that in case\nwe attach/detach partition relcaches for all child relations (if they\nexist) need invalidation.\nInstallcheck world succeeds after the patch. Tests from the patch fail as\nthey should when run on the master branch. Found no problems.\n\nOverall patch is good and I'd recommend it to be committed.\n\nWe've made v4 patch according to Nitin's advice and tested it, but still\nhave no objections to patch v3. Each can be committed, equally good.\n\nBig thanks to you, Álvaro and Amit for working on this!\n\n-- \nBest regards,\nPavel Borisov, Maxim Orlov\n\nPostgres Professional: http://postgrespro.com <http://www.postgrespro.com>", "msg_date": "Wed, 6 Oct 2021 17:40:37 +0400", "msg_from": "Pavel Borisov <pashkin.elfe@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" }, { "msg_contents": "On Wed, Oct 6, 2021 at 10:40 PM Pavel Borisov <pashkin.elfe@gmail.com> wrote:\n>\n> Hi, hackers!\n>\n> We've reviewed patch v3 and found it right. Completely agree that in case we attach/detach partition relcaches for all child relations (if they exist) need invalidation.\n> Installcheck world succeeds after the patch. Tests from the patch fail as they should when run on the master branch. Found no problems.\n>\n> Overall patch is good and I'd recommend it to be committed.\n>\n> We've made v4 patch according to Nitin's advice and tested it, but still have no objections to patch v3. Each can be committed, equally good.\n\nThanks Pavel, Nitin for your reviews.\n\nI was looking again at the following hunk in the patch and started\nwondering if the lockmode for the children in\nDetachPartitionFinalize() shouldn't be the same as used for the parent\nmentioned in the DETACH PARTITION command:\n\n@@ -18150,6 +18168,26 @@ DetachPartitionFinalize(Relation rel,\nRelation partRel, bool concurrent,\n * included in its partition descriptor.\n */\n CacheInvalidateRelcache(rel);\n+\n+ /*\n+ * If the partition we just detached is partitioned itself, invalidate\n+ * relcache for all descendent partitions too to ensure that their\n+ * rd_partcheck expression trees are rebuilt; must lock partitions\n+ * before doing so.\n+ */\n+ if (partRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)\n+ {\n+ List *partRel_children =\n+ find_all_inheritors(RelationGetRelid(partRel),\n+ AccessExclusiveLock, NULL);\n\nThe lock taken on the parent is either ShareUpdateExclusiveLock or\nAccessExclusiveLock depending on whether CONCURRENTLY is specified or\nnot. Maybe that should be considered also when locking the children.\n\nI've updated the patch that way. (Also, reintroduced the slightly\nlonger commit message that I had added in v3. :))\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 15 Oct 2021 16:13:19 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" }, { "msg_contents": ">\n> The lock taken on the parent is either ShareUpdateExclusiveLock or\n> AccessExclusiveLock depending on whether CONCURRENTLY is specified or\n> not. Maybe that should be considered also when locking the children.\n>\n> I've updated the patch that way. (Also, reintroduced the slightly\n> longer commit message that I had added in v3. :))\n>\n\nThanks Amit, for your work!\n\nI am little bit reluctant to the change you made in v5. As per\nhttps://www.postgresql.org/docs/14/sql-altertable.html:\n\n> If CONCURRENTLY is specified, ... the second transaction acquires SHARE\nUPDATE EXCLUSIVE on the partitioned table and ACCESS EXCLUSIVE on the\npartition, and the detach process completes.\n\nIn comment to find_all_inheritors():\n\n> The specified lock type is acquired on all child relations (but not on\nthe given rel; caller should already have locked it)\n\nSo I conclude that it is done in a right way in v3 with ACCESS_EXCLUSIVE\nlock.\n\nAlso I'd recommend removing the link to a discussion from the test. Anyway\nwe have link in a commit message.\n-- Report:\nhttps://postgr.es/m/OS3PR01MB5718DA1C4609A25186D1FBF194089%40OS3PR01MB5718.jpnprd01.prod.outlook.com\n\n--\nBest regards,\nPavel Borisov\n\nPostgres Professional: http://postgrespro.com <http://www.postgrespro.com>\n\nThe lock taken on the parent is either ShareUpdateExclusiveLock or\nAccessExclusiveLock depending on whether CONCURRENTLY is specified or\nnot.  Maybe that should be considered also when locking the children.\n\nI've updated the patch that way.  (Also, reintroduced the slightly\nlonger commit message that I had added in v3. :))Thanks Amit, for your work!I am little bit reluctant to the change you made in v5. As per https://www.postgresql.org/docs/14/sql-altertable.html:> If CONCURRENTLY is specified, ... the second transaction acquires SHARE UPDATE EXCLUSIVE on the partitioned table and ACCESS EXCLUSIVE on the partition, and the detach process completes.In comment to find_all_inheritors():> The specified lock type is acquired on all child relations (but not on the given rel; caller should already have locked it) So I conclude that it is done in a right way in v3 with ACCESS_EXCLUSIVE lock.Also I'd recommend removing the link to a discussion from the test. Anyway we have link in a commit message.-- Report: https://postgr.es/m/OS3PR01MB5718DA1C4609A25186D1FBF194089%40OS3PR01MB5718.jpnprd01.prod.outlook.com--Best regards,Pavel BorisovPostgres Professional: http://postgrespro.com", "msg_date": "Fri, 15 Oct 2021 12:01:52 +0400", "msg_from": "Pavel Borisov <pashkin.elfe@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" }, { "msg_contents": "Hi Pavel,\n\nOn Fri, Oct 15, 2021 at 5:02 PM Pavel Borisov <pashkin.elfe@gmail.com> wrote:\n>> The lock taken on the parent is either ShareUpdateExclusiveLock or\n>> AccessExclusiveLock depending on whether CONCURRENTLY is specified or\n>> not. Maybe that should be considered also when locking the children.\n>>\n>> I've updated the patch that way. (Also, reintroduced the slightly\n>> longer commit message that I had added in v3. :))\n>\n>\n> Thanks Amit, for your work!\n>\n> I am little bit reluctant to the change you made in v5. As per https://www.postgresql.org/docs/14/sql-altertable.html:\n>\n> > If CONCURRENTLY is specified, ... the second transaction acquires SHARE UPDATE EXCLUSIVE on the partitioned table and ACCESS EXCLUSIVE on the partition, and the detach process completes.\n>\n> In comment to find_all_inheritors():\n>\n> > The specified lock type is acquired on all child relations (but not on the given rel; caller should already have locked it)\n>\n> So I conclude that it is done in a right way in v3 with ACCESS_EXCLUSIVE lock.\n\nOops, you're right. I had failed to notice when reading the code that\nthe second transaction takes an AccessExclusiveLock on the target\npartition. Reverted back to how this was in v3.\n\n> Also I'd recommend removing the link to a discussion from the test. Anyway we have link in a commit message.\n> -- Report: https://postgr.es/m/OS3PR01MB5718DA1C4609A25186D1FBF194089%40OS3PR01MB5718.jpnprd01.prod.outlook.com\n\nYeah, maybe the link is unnecessary in the test comment, so removed.\nThough, I do occasionally see one of those in the test files (try `git\ngrep https src/test`).\n\nThanks again.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 18 Oct 2021 16:28:57 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" }, { "msg_contents": ">\n> Oops, you're right. I had failed to notice when reading the code that\n> the second transaction takes an AccessExclusiveLock on the target\n> partition. Reverted back to how this was in v3.\n>\n> > Also I'd recommend removing the link to a discussion from the test.\n> Anyway we have link in a commit message.\n> > -- Report:\n> https://postgr.es/m/OS3PR01MB5718DA1C4609A25186D1FBF194089%40OS3PR01MB5718.jpnprd01.prod.outlook.com\n>\n> Yeah, maybe the link is unnecessary in the test comment, so removed.\n> Though, I do occasionally see one of those in the test files (try `git\n> grep https src/test`).\n>\n\nThanks! I don't see problems anymore. The patch is RFC now.\n\n-- \nBest regards,\nPavel Borisov\n\nPostgres Professional: http://postgrespro.com <http://www.postgrespro.com>\n\nOops, you're right.  I had failed to notice when reading the code that\nthe second transaction takes an AccessExclusiveLock on the target\npartition.  Reverted back to how this was in v3.\n\n> Also I'd recommend removing the link to a discussion from the test. Anyway we have link in a commit message.\n> -- Report: https://postgr.es/m/OS3PR01MB5718DA1C4609A25186D1FBF194089%40OS3PR01MB5718.jpnprd01.prod.outlook.com\n\nYeah, maybe the link is unnecessary in the test comment, so removed.\nThough, I do occasionally see one of those in the test files (try `git\ngrep https src/test`).Thanks! I don't see problems anymore. The patch is RFC now.-- Best regards,Pavel BorisovPostgres Professional: http://postgrespro.com", "msg_date": "Mon, 18 Oct 2021 20:56:25 +0400", "msg_from": "Pavel Borisov <pashkin.elfe@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" }, { "msg_contents": "On 2021-Oct-18, Pavel Borisov wrote:\n\n> > Yeah, maybe the link is unnecessary in the test comment, so removed.\n> > Though, I do occasionally see one of those in the test files (try `git\n> > grep https src/test`).\n\nYeah, for example the test stanza just above says \"test case for but\n16242\". Sadly there's no bug number to quote here. Anyway, an\ninterested reader will still be able to get to this thread via \"git\nblame\" when this get committed, which seems enough.\n\n> Thanks! I don't see problems anymore. The patch is RFC now.\n\nThanks. I'm looking at it now. I notice that if I take out the code\nfix and keep the tests, I only see the ATTACH side of the problem have a\nfailure; I expected to see a failure for DETACH too.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Mon, 18 Oct 2021 14:09:08 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" }, { "msg_contents": "On 2021-Oct-18, Alvaro Herrera wrote:\n\n> Thanks. I'm looking at it now. I notice that if I take out the code\n> fix and keep the tests, I only see the ATTACH side of the problem have a\n> failure; I expected to see a failure for DETACH too.\n\nAh, no, the test covers both cases; it's just that if it fails the first\ntime, it'll fail to fail the second time. But if I run it separately,\nand make it succeed the first time, then the second one will fail as\nexpected. This becomes better visible by adding \\c in a few places, but\nI don't think it's necessary to add it to the committed test -- I'm\ntaking the code as Amit submitted.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\nMaybe there's lots of data loss but the records of data loss are also lost.\n(Lincoln Yeoh)\n\n\n", "msg_date": "Mon, 18 Oct 2021 14:40:31 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" }, { "msg_contents": "Pushed now to all branches. Thanks much!\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\"The important things in the world are problems with society that we don't\nunderstand at all. The machines will become more complicated but they won't\nbe more complicated than the societies that run them.\" (Freeman Dyson)\n\n\n", "msg_date": "Mon, 18 Oct 2021 19:14:49 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" }, { "msg_contents": "On Tue, Oct 19, 2021 at 7:14 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> Pushed now to all branches. Thanks much!\n\nThanks Álavro.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 19 Oct 2021 10:23:25 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Partition Check not updated when insert into a partition" } ]
[ { "msg_contents": "I noticed that while inserting directly into a partition table we\ncompute the PartitionCheckExpr by traversing all the parent partitions\nvia ExecPartitionCheck()->RelationGetPartitionQual()->generate_partition_qual().\nWe take AccessShareLock on parent tables while generating qual.\n\nNow, on the other hand, while dropping constraint on a partitioned\ntable, we take the lock from parent to all the child tables.\n\nI think taking locks in opposite directions can lead to deadlock in\nthese operations.\n\nI have tried with the below example on HEAD.\n\nSetup\n=======\ncreate or replace function func_dummy(price integer) returns integer as\n$$\n begin\n raise notice 'hello from func_dummy';\n return price;\n end;\n$$ language plpgsql immutable parallel unsafe;\n\n\nCREATE TABLE pt_test (a int, c char(1000)) PARTITION BY range (a);\nCREATE TABLE pt_test1 PARTITION OF pt_test FOR VALUES FROM (0) TO (100000);\nCREATE TABLE pt_test2 PARTITION OF pt_test FOR VALUES FROM (100000) TO (400000);\n\nALTER TABLE pt_test ADD CONSTRAINT check_cons CHECK(func_dummy(a) == a);\n\nActual test\n=============\nSession-1\n--------------\nAdd breakpoint in generate_partition_qual(). Perform below statement.\ninsert into pt_test2 values(100001, 'aaaa');\n\nNow, stop in the debugger just before taking AccessShareLock on the\nparent table.\n\nSession-2\n=========\n ALTER TABLE pt_test DROP CONSTRAINT check_cons;\n\nYou will see that session-2 is waiting to get a lock on pt_test2.\nThen, continue debugging in session-1 which will lead to a deadlock.\n\nIs this expected, if so why?\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 23 Jun 2021 14:37:24 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": true, "msg_subject": "Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Wednesday, June 23, 2021 5:07 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\r\n> I noticed that while inserting directly into a partition table we compute the\r\n> PartitionCheckExpr by traversing all the parent partitions via\r\n> ExecPartitionCheck()->RelationGetPartitionQual()->generate_partition_qual().\r\n> We take AccessShareLock on parent tables while generating qual.\r\n> \r\n> Now, on the other hand, while dropping constraint on a partitioned table, we\r\n> take the lock from parent to all the child tables.\r\n> \r\n> I think taking locks in opposite directions can lead to deadlock in these\r\n> operations.\r\n> \r\n> I have tried with the below example on HEAD.\r\n> \r\n> Setup\r\n> =======\r\n> create or replace function func_dummy(price integer) returns integer as $$\r\n> begin\r\n> raise notice 'hello from func_dummy';\r\n> return price;\r\n> end;\r\n> $$ language plpgsql immutable parallel unsafe;\r\n> \r\n> \r\n> CREATE TABLE pt_test (a int, c char(1000)) PARTITION BY range (a); CREATE\r\n> TABLE pt_test1 PARTITION OF pt_test FOR VALUES FROM (0) TO (100000);\r\n> CREATE TABLE pt_test2 PARTITION OF pt_test FOR VALUES FROM (100000) TO\r\n> (400000);\r\n> \r\n> ALTER TABLE pt_test ADD CONSTRAINT check_cons CHECK(func_dummy(a)\r\n> == a);\r\n> \r\n> Actual test\r\n> =============\r\n> Session-1\r\n> --------------\r\n> Add breakpoint in generate_partition_qual(). Perform below statement.\r\n> insert into pt_test2 values(100001, 'aaaa');\r\n> \r\n> Now, stop in the debugger just before taking AccessShareLock on the parent\r\n> table.\r\n> \r\n> Session-2\r\n> =========\r\n> ALTER TABLE pt_test DROP CONSTRAINT check_cons;\r\n> \r\n> You will see that session-2 is waiting to get a lock on pt_test2.\r\n> Then, continue debugging in session-1 which will lead to a deadlock.\r\n\r\nI can reproduce this dead lock issue with the above steps.\r\nAnd I can see the following error message.\r\n\r\npostgres=# insert into pt_test2 values(100001, 'aaaa');\r\nNOTICE: hello from func_dummy\r\nERROR: deadlock detected\r\nDETAIL: Process 3068763 waits for AccessShareLock on relation 16385 of database 13027; blocked by process 3068966.\r\nProcess 3068966 waits for AccessExclusiveLock on relation 16393 of database 13027; blocked by process 3068763.\r\nHINT: See server log for query details.\r\n\r\nBest regards,\r\nhouzj\r\n", "msg_date": "Wed, 23 Jun 2021 09:26:36 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Wed, 23 Jun 2021 at 21:07, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> I noticed that while inserting directly into a partition table we\n> compute the PartitionCheckExpr by traversing all the parent partitions\n> via ExecPartitionCheck()->RelationGetPartitionQual()->generate_partition_qual().\n> We take AccessShareLock on parent tables while generating qual.\n>\n> Now, on the other hand, while dropping constraint on a partitioned\n> table, we take the lock from parent to all the child tables.\n>\n> I think taking locks in opposite directions can lead to deadlock in\n> these operations.\n\nI wonder if it's possible to do any better here? Surely when\ntraversing from child to parent we must lock the child before checking\nwhat the parent relation is.\n\nI think the reasons for doing operations directly on partitions are\nbeing reduced with each release. What operations do people really\nneed to do on partitions now? TRUNCATE is probably one, maybe there's\nstill a need to CREATE INDEX. There's not much to gain performance\nwise now inserting directly into a partition. There's a pending patch\naround that aims to speed that up further by caching the last used\npartition and trying that first.\n\nI've recently been thinking it would be good if you were unable to\naccess partitions directly by name at all. That would also get around\nthe problem of having to lock all non-pruned partitions during queries\nto the partitioned table. Maybe it's too late for that though.\n\nDavid\n\n\n", "msg_date": "Thu, 24 Jun 2021 10:27:06 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "David Rowley <dgrowleyml@gmail.com> writes:\n> I've recently been thinking it would be good if you were unable to\n> access partitions directly by name at all.\n\nI strongly disagree. That's essentially betting the whole farm on\nour always being able to optimize parent-level operations fully,\nwhich I do not think we are anywhere close to.\n\n> That would also get around\n> the problem of having to lock all non-pruned partitions during queries\n> to the partitioned table. Maybe it's too late for that though.\n\nYeah, I think we are locked into the current design now, for better\nor worse.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 23 Jun 2021 18:38:51 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Thu, 24 Jun 2021 at 10:38, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> David Rowley <dgrowleyml@gmail.com> writes:\n> > I've recently been thinking it would be good if you were unable to\n> > access partitions directly by name at all.\n>\n> I strongly disagree. That's essentially betting the whole farm on\n> our always being able to optimize parent-level operations fully,\n> which I do not think we are anywhere close to.\n\nDid you have anything in particular in mind here? I thought we got\nall these in 8edd0e794. I think the one that was missing was parallel\nindex scans. That commit adds code to add the missing paths in\nadd_paths_to_append_rel().\n\nAs of 14, UPDATE/DELETEs when a single partition remains after pruning\nshould be fairly comparable to a direct UPDATE/DELETE on the\npartition.\n\nCertainly, back when partitioning was added there were still lots of\nuse cases for querying partitions directly, but as far as I see it,\nthere's not many of those left. The patch in [1] aims to reduce the\noverhead of one of these. I have a patch locally for another one. I'm\ncurrently not aware of any other cases where querying a single\npartition is slow.\n\nBut... maybe there are some cases where a user can be certain that all\ninteresting records are contained in a single partition but\npartitioning pruning cannot prove it...So maybe what you say is right.\nThe workaround there would be to add a qual that allows pruning to\nwork.\n\nDavid\n\n[1] https://www.postgresql.org/message-id/CA+HiwqGqh-aHXGO8-_ftU7e2GdGUr_T-xqr6Z_6uagyJpEpJfA@mail.gmail.com\n\n\n", "msg_date": "Thu, 24 Jun 2021 12:07:04 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "David Rowley <dgrowleyml@gmail.com> writes:\n> On Thu, 24 Jun 2021 at 10:38, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> I strongly disagree. That's essentially betting the whole farm on\n>> our always being able to optimize parent-level operations fully,\n>> which I do not think we are anywhere close to.\n\n> Did you have anything in particular in mind here?\n\nI don't think it's very hard to make up WHERE conditions that a person can\nsee select only one partition, but PG won't be able to figure that out.\n\n> But... maybe there are some cases where a user can be certain that all\n> interesting records are contained in a single partition but\n> partitioning pruning cannot prove it...So maybe what you say is right.\n> The workaround there would be to add a qual that allows pruning to\n> work.\n\n[ shrug... ] It's about as easy to just name the partition you want.\nWhen planning overhead is considered, maybe it's a lot easier.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 23 Jun 2021 20:14:24 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Thu, 24 Jun 2021 at 12:14, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> David Rowley <dgrowleyml@gmail.com> writes:\n> > But... maybe there are some cases where a user can be certain that all\n> > interesting records are contained in a single partition but\n> > partitioning pruning cannot prove it...So maybe what you say is right.\n> > The workaround there would be to add a qual that allows pruning to\n> > work.\n>\n> [ shrug... ] It's about as easy to just name the partition you want.\n> When planning overhead is considered, maybe it's a lot easier.\n\nI'm not suggesting that we go and make it impossible for users to\ndirectly reference partitions today. What I mean is that as we add\nmore and more fixes to improve performance of partitioning, that there\ncomes a point where the ability to directly reference partitions is a\nhindrance rather than something that's useful. Right now that\nhindrance is the fact that we must lock every single partition in the\nplan. We only need to do that in case some other backend is doing\nsomething that bypasses taking a lock on the parent partitioned table.\nThe overhead of taking these locks is pretty significant for\npartitioned tables with lots of partitions where only 1 of them\nsurvives run-time partition pruning. That's really terrible for\npeople that want to PREPARE queries and just look up a single row from\na single partition. That seems like a pretty big use case that we're\njust terrible at today.\n\nDavid\n\n\n", "msg_date": "Thu, 24 Jun 2021 12:32:33 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "David Rowley <dgrowleyml@gmail.com> writes:\n> ... What I mean is that as we add\n> more and more fixes to improve performance of partitioning, that there\n> comes a point where the ability to directly reference partitions is a\n> hindrance rather than something that's useful. Right now that\n> hindrance is the fact that we must lock every single partition in the\n> plan. We only need to do that in case some other backend is doing\n> something that bypasses taking a lock on the parent partitioned table.\n\nTBH, I buy no part of that line of reasoning. I don't think that the\nability to access partitions directly is a material problem here;\nI doubt that we need to lock every partition in the plan when run-time\nrouting is working (surely we only need to lock the partition mapping);\nand most especially I don't see why an operation on a child table that\ndoesn't lock the parent would cause a problem for queries that do not\nneed to access that child. Perhaps we've got some implementation issues\nto fix, but I see no fundamental problem there.\n\nIt is true that this design can lead to deadlocks between operations that\nstart from the parent vs ones that start from the child and then discover\nthat they need to lock the parent. But the latter should be darn rare.\nIn any case, your solution seems to amount to prohibiting not only the\nlatter class of operations altogether, but *also* prohibiting operations\non the child that don't need to lock the parent. I fail to see how that\nmakes anybody's life better.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 23 Jun 2021 20:45:40 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Thu, 24 Jun 2021 at 12:45, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> I don't think that the\n> ability to access partitions directly is a material problem here;\n> I doubt that we need to lock every partition in the plan when run-time\n> routing is working (surely we only need to lock the partition mapping);\n> and most especially I don't see why an operation on a child table that\n> doesn't lock the parent would cause a problem for queries that do not\n> need to access that child. Perhaps we've got some implementation issues\n> to fix, but I see no fundamental problem there.\n\nNot quite sure I know what you mean by \"lock the partition mapping\".\n\nWe do unfortunately need to lock all partitions in the plan before\nrun-time pruning completes. For example, if someone drops an index\nfrom one of the partitions that's used in the plan, then we must take\nthe lock before execution so that we properly invalidate the plan and\nget another one. I'm not sure I see how that could be done during\nexecution, We might have already started returning rows to the client\nby that time.\n\n> It is true that this design can lead to deadlocks between operations that\n> start from the parent vs ones that start from the child and then discover\n> that they need to lock the parent. But the latter should be darn rare.\n> In any case, your solution seems to amount to prohibiting not only the\n> latter class of operations altogether, but *also* prohibiting operations\n> on the child that don't need to lock the parent.\n\nAgain, I'm not saying we need to go and make partitioning work this\nway. I'm saying that the problem wouldn't exist if it did work that\nway and that there appears to be no solution to fix it without making\nit work that way.\n\n> I fail to see how that\n> makes anybody's life better.\n\nWell, if you ignore the perfectly valid use case that I mentioned\nthen, yeah. Or do you not think that doing a single-row lookup on a\npartitioned table with a prepared query is a case worth worrying\nabout?\n\nI grabbed a profile from a generic plan being executed on a\npartitioned table with 100 partitions. It's completely dominated by\nlock management and looks like this:\n\n 22.42% postgres postgres [.] hash_search_with_hash_value\n 9.06% postgres postgres [.] hash_bytes\n 4.14% postgres postgres [.] LockAcquireExtended\n 3.90% postgres postgres [.] AllocSetAlloc\n 3.84% postgres postgres [.] hash_seq_search\n 3.77% postgres postgres [.] LockReleaseAll\n\nI don't think 100 partitions is excessive.\n\nDavid\n\n\n", "msg_date": "Thu, 24 Jun 2021 13:13:55 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Thu, Jun 24, 2021 at 1:45 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> David Rowley <dgrowleyml@gmail.com> writes:\n> > ... What I mean is that as we add\n> > more and more fixes to improve performance of partitioning, that there\n> > comes a point where the ability to directly reference partitions is a\n> > hindrance rather than something that's useful. Right now that\n> > hindrance is the fact that we must lock every single partition in the\n> > plan. We only need to do that in case some other backend is doing\n> > something that bypasses taking a lock on the parent partitioned table.\n>\n> TBH, I buy no part of that line of reasoning. I don't think that the\n> ability to access partitions directly is a material problem here;\n> I doubt that we need to lock every partition in the plan when run-time\n> routing is working (surely we only need to lock the partition mapping);\n> and most especially I don't see why an operation on a child table that\n> doesn't lock the parent would cause a problem for queries that do not\n> need to access that child. Perhaps we've got some implementation issues\n> to fix, but I see no fundamental problem there.\n>\n> It is true that this design can lead to deadlocks between operations that\n> start from the parent vs ones that start from the child and then discover\n> that they need to lock the parent. But the latter should be darn rare.\n> In any case, your solution seems to amount to prohibiting not only the\n> latter class of operations altogether, but *also* prohibiting operations\n> on the child that don't need to lock the parent. I fail to see how that\n> makes anybody's life better.\n\nI agree with David's points above.\n\nMaybe I've missed something but I don't see any benefit in being able\nto reference individual partitions by name, as a feature. Maybe as a\ntemporary performance trick, but app devs just want partitioning to be\ninvisible to them at the application level. It's a modularity\nviolation to be able to access parts of a table, just like it would be\nif we allowed people to reference individual smgr files.\n\nIf that requires that we add a new non-default option, no problem.\nMost people will want to use that option, AFAICS.\n\n-- \nSimon Riggs http://www.EnterpriseDB.com/\n\n\n", "msg_date": "Thu, 24 Jun 2021 15:26:23 +0100", "msg_from": "Simon Riggs <simon.riggs@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Thu, 24 Jun 2021 at 12:32, David Rowley <dgrowleyml@gmail.com> wrote:\n> The overhead of taking these locks is pretty significant for\n> partitioned tables with lots of partitions where only 1 of them\n> survives run-time partition pruning. That's really terrible for\n> people that want to PREPARE queries and just look up a single row from\n> a single partition. That seems like a pretty big use case that we're\n> just terrible at today.\n\nI wonder, since we can't delay taking locks until after run-time\npruning due to being unable to invalidate cached plans, maybe instead\nwe could tag on any PartitionPruneInfo onto the PlannedStmt itself and\ndo the init plan run-time prune run during AcquireExecutorLocks().\n\nA lock would need to be taken on each partitioned table before we\nprune for it. So if there was multi-level partitioning, we'd need to\nlock the partitioned table, do pruning for that partitioned table,\nthen lock any sub-partitioned tables before doing pruning on those.\n\nI don't immediately see why it couldn't be made to work, it's just\nthat it adds quite a lot of complexity to what's being done in\nAcquireExecutorLocks(), which today is a very simple function.\n\nDavid\n\n\n", "msg_date": "Fri, 25 Jun 2021 13:26:08 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Thu, Jun 24, 2021 at 10:27:06AM +1200, David Rowley wrote:\n> I think the reasons for doing operations directly on partitions are\n> being reduced with each release. What operations do people really\n> need to do on partitions now? TRUNCATE is probably one, maybe there's\n> still a need to CREATE INDEX.\n\nWe always SELECT out of parent tables, but need to be able to CREATE INDEX on\npartitions.\n\nAnd INSERT ON CONFLICT into partitions, as we don't have nor want partitioned\nindexes, for several reasons. Same for row triggers. One reason is that we\nstill support inheritence tables, and it's better if we can deal with both\ntypes of child tables the same way. That neither DETACH nor NO INHERIT grammar\nsupports both is arguably a wart, as it requires our library to check the\nrelkind. Another reason is that our unique indexes are large - they're across\nmultiple columns, sometimes text columns, and we don't need them except to\nsupport upsert, so they're pruned when the table is no longer \"recent\".\n\nPartitions have to be manually created and dropped, so applications already\nhave to deal with partitions, and it's not surprising if they interact with\nthem in other ways, too. Partitions can themselves be partitioned, which also\nneed to be accessed directly.\n\n-- \nJustin\n\n\n", "msg_date": "Fri, 25 Jun 2021 23:41:40 -0500", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Fri, Jun 25, 2021 at 10:26 AM David Rowley <dgrowleyml@gmail.com> wrote:\n> On Thu, 24 Jun 2021 at 12:32, David Rowley <dgrowleyml@gmail.com> wrote:\n> > The overhead of taking these locks is pretty significant for\n> > partitioned tables with lots of partitions where only 1 of them\n> > survives run-time partition pruning. That's really terrible for\n> > people that want to PREPARE queries and just look up a single row from\n> > a single partition. That seems like a pretty big use case that we're\n> > just terrible at today.\n>\n> I wonder, since we can't delay taking locks until after run-time\n> pruning due to being unable to invalidate cached plans, maybe instead\n> we could tag on any PartitionPruneInfo onto the PlannedStmt itself and\n> do the init plan run-time prune run during AcquireExecutorLocks().\n\nThis is exactly what I was mulling doing when working on [1] some last\nyear, after an off-list discussion with Robert (he suggested the idea\nIIRC), though I never quite finished writing a patch. I have planned\nto revisit this topic (\"locking overhead in generic plans\") for v15,\nnow that we have *some* proposals mentioned in [1] committed to v14,\nso can look into this.\n\n> A lock would need to be taken on each partitioned table before we\n> prune for it. So if there was multi-level partitioning, we'd need to\n> lock the partitioned table, do pruning for that partitioned table,\n> then lock any sub-partitioned tables before doing pruning on those.\n>\n> I don't immediately see why it couldn't be made to work, it's just\n> that it adds quite a lot of complexity to what's being done in\n> AcquireExecutorLocks(), which today is a very simple function.\n\nYeah, AcquireExecutorLocks()'s current method of finding the set of\nrelations to lock is very simple -- just scan the range table\n(PlannedStmt.rtable). If we're to remove prunable leaf partitions\nfrom that set, maybe we'd have to find a way to remove them from\nPlannedStmt.rtable as part of running the \"init\" pruning, which we'd\nhave to do anyway, because perhaps the executor proper (mainly\nInitPlan) should also see the shrunken version of the range table.\nNot to mention the complexity of getting the \"init\" pruning itself to\nrun outside a full-blown executor context.\n\nAnyway, do you agree with starting a thread to discuss possible\napproaches to attack this?\n\n--\nAmit Langote\nEDB: http://www.enterprisedb.com\n\n[1] https://www.postgresql.org/message-id/CA+HiwqG7ZruBmmih3wPsBZ4s0H2EhywrnXEduckY5Hr3fWzPWA@mail.gmail.com\n\n\n", "msg_date": "Mon, 28 Jun 2021 12:58:51 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Thu, Jun 24, 2021 at 7:27 AM David Rowley <dgrowleyml@gmail.com> wrote:\n> On Wed, 23 Jun 2021 at 21:07, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> > I noticed that while inserting directly into a partition table we\n> > compute the PartitionCheckExpr by traversing all the parent partitions\n> > via ExecPartitionCheck()->RelationGetPartitionQual()->generate_partition_qual().\n> > We take AccessShareLock on parent tables while generating qual.\n> >\n> > Now, on the other hand, while dropping constraint on a partitioned\n> > table, we take the lock from parent to all the child tables.\n> >\n> > I think taking locks in opposite directions can lead to deadlock in\n> > these operations.\n>\n> I wonder if it's possible to do any better here? Surely when\n> traversing from child to parent we must lock the child before checking\n> what the parent relation is.\n\nI remember there was a discussion where I proposed to document the\ndeadlock hazard that exists when performing DML directly on\npartitions. The proposal didn't get enough attention, perhaps because\nit was in the middle of a long reply about other concerns:\n\nhttps://www.postgresql.org/message-id/16db1458-67cf-4add-736e-31b053115e8e%40lab.ntt.co.jp\n\nMaybe a good idea to add a line or 2 in 5.11. Table Partitioning?\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Mon, 28 Jun 2021 13:20:33 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Mon, Jun 28, 2021 at 12:58 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Fri, Jun 25, 2021 at 10:26 AM David Rowley <dgrowleyml@gmail.com> wrote:\n> > On Thu, 24 Jun 2021 at 12:32, David Rowley <dgrowleyml@gmail.com> wrote:\n> > > The overhead of taking these locks is pretty significant for\n> > > partitioned tables with lots of partitions where only 1 of them\n> > > survives run-time partition pruning. That's really terrible for\n> > > people that want to PREPARE queries and just look up a single row from\n> > > a single partition. That seems like a pretty big use case that we're\n> > > just terrible at today.\n> >\n> > I wonder, since we can't delay taking locks until after run-time\n> > pruning due to being unable to invalidate cached plans, maybe instead\n> > we could tag on any PartitionPruneInfo onto the PlannedStmt itself and\n> > do the init plan run-time prune run during AcquireExecutorLocks().\n>\n> This is exactly what I was mulling doing when working on [1] some last\n> year, after an off-list discussion with Robert (he suggested the idea\n> IIRC), though I never quite finished writing a patch.\n\nAh, I *had* mentioned this bit in the first email of [1]:\n\n\"Another solution suggested to me by Robert Haas in an off-list\ndiscussion is to teach AcquireExecutorLocks() or the nearby code to\nperform EXTERN parameter based pruning before passing the plan tree to\nthe executor and lock partitions that survive that pruning. It's\nperhaps doable if we refactor the ExecFindInitialMatchingSubPlans() to\nnot require a full-blown execution context. Or maybe we could do\nsomething more invasive by rewriting AcquireExecutorLocks() to walk\nthe plan tree instead of the flat range table, looking for scan nodes\nand nodes that support runtime pruning to lock the appropriate\nrelations.\"\n\nAlas, I hadn't written down any concrete proposals as to how that\ncould be done. :(\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Mon, 28 Jun 2021 15:14:10 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Mon, Jun 28, 2021 at 9:50 AM Amit Langote <amitlangote09@gmail.com> wrote:\n>\n> On Thu, Jun 24, 2021 at 7:27 AM David Rowley <dgrowleyml@gmail.com> wrote:\n> > On Wed, 23 Jun 2021 at 21:07, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> > > I noticed that while inserting directly into a partition table we\n> > > compute the PartitionCheckExpr by traversing all the parent partitions\n> > > via ExecPartitionCheck()->RelationGetPartitionQual()->generate_partition_qual().\n> > > We take AccessShareLock on parent tables while generating qual.\n> > >\n> > > Now, on the other hand, while dropping constraint on a partitioned\n> > > table, we take the lock from parent to all the child tables.\n> > >\n> > > I think taking locks in opposite directions can lead to deadlock in\n> > > these operations.\n> >\n> > I wonder if it's possible to do any better here? Surely when\n> > traversing from child to parent we must lock the child before checking\n> > what the parent relation is.\n>\n> I remember there was a discussion where I proposed to document the\n> deadlock hazard that exists when performing DML directly on\n> partitions.\n>\n\n+1. I think it is better if we can also write in code comments or\nREADME about this. How about adding something to README/code about\nlocking of partitions for different operations? Unless I am missing\nit, I think some of this information is there in bits and pieces but\nit would be great if we can have it consolidated at someplace.\n\n> The proposal didn't get enough attention, perhaps because\n> it was in the middle of a long reply about other concerns:\n>\n> https://www.postgresql.org/message-id/16db1458-67cf-4add-736e-31b053115e8e%40lab.ntt.co.jp\n>\n> Maybe a good idea to add a line or 2 in 5.11. Table Partitioning?\n>\n\nSounds reasonable, but I think it would be better if can mention the\nscenarios/cases where there is a possibility of deadlocks.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 28 Jun 2021 11:44:27 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Sat, 26 Jun 2021 at 16:41, Justin Pryzby <pryzby@telsasoft.com> wrote:\n>\n> On Thu, Jun 24, 2021 at 10:27:06AM +1200, David Rowley wrote:\n> > I think the reasons for doing operations directly on partitions are\n> > being reduced with each release. What operations do people really\n> > need to do on partitions now? TRUNCATE is probably one, maybe there's\n> > still a need to CREATE INDEX.\n>\n> We always SELECT out of parent tables, but need to be able to CREATE INDEX on\n> partitions.\n\nI imagined we'd have something along the lines of: ALTER TABLE\npartitioned_table ALTER PARTITION part CREATE INDEX. I admit I don't\nknow how that would look when faced with multi-level partitioning.\n\n> And INSERT ON CONFLICT into partitions,\n\nI didn't think of that one. Looks like we're further away from\npartitioning being transparent to queries and DML than I thought :-(\n\nDavid\n\n\n", "msg_date": "Mon, 28 Jun 2021 23:46:30 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Mon, 28 Jun 2021 at 15:59, Amit Langote <amitlangote09@gmail.com> wrote:\n>\n> On Fri, Jun 25, 2021 at 10:26 AM David Rowley <dgrowleyml@gmail.com> wrote:\n> > I wonder, since we can't delay taking locks until after run-time\n> > pruning due to being unable to invalidate cached plans, maybe instead\n> > we could tag on any PartitionPruneInfo onto the PlannedStmt itself and\n> > do the init plan run-time prune run during AcquireExecutorLocks().\n>\n> This is exactly what I was mulling doing when working on [1] some last\n> year, after an off-list discussion with Robert (he suggested the idea\n> IIRC), though I never quite finished writing a patch. I have planned\n> to revisit this topic (\"locking overhead in generic plans\") for v15,\n> now that we have *some* proposals mentioned in [1] committed to v14,\n> so can look into this.\n\nI thought about this only a little bit more from when I wrote the\nabove. I think it would require adding yet another stage of when we\ndo run-time pruning. It should be possible to do pruning when there's\nGeneratePruningStepsContext.has_exec_param == true. However, I'm not\nso sure that we could do GeneratePruningStepsContext.has_mutable_arg.\nEvaluating the value for those requires some level of actual\nexecution. That's a pity as we'd still need to take a bunch of extra\nlocks in a case like: SELECT * FROM time_parted WHERE ts >= NOW() -\nINTERVAL '1 hour';\n\nI see the param values are fairly easily accessible a couple of levels\nup from AcquireExecutorLocks() in GetCachedPlan().\n\n> Anyway, do you agree with starting a thread to discuss possible\n> approaches to attack this?\n\nAgreed about the separate thread. We can discuss it further there.\n\nDavid\n\n> [1] https://www.postgresql.org/message-id/CA+HiwqG7ZruBmmih3wPsBZ4s0H2EhywrnXEduckY5Hr3fWzPWA@mail.gmail.com\n\n\n", "msg_date": "Tue, 29 Jun 2021 00:08:31 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" }, { "msg_contents": "On Mon, 2021-06-28 at 23:46 +1200, David Rowley wrote:\n> On Sat, 26 Jun 2021 at 16:41, Justin Pryzby <pryzby@telsasoft.com> wrote:\n> > On Thu, Jun 24, 2021 at 10:27:06AM +1200, David Rowley wrote:\n> > > I think the reasons for doing operations directly on partitions are\n> > > being reduced with each release. What operations do people really\n> > > need to do on partitions now? TRUNCATE is probably one, maybe there's\n> > > still a need to CREATE INDEX.\n> > \n> > We always SELECT out of parent tables, but need to be able to CREATE INDEX on\n> > partitions.\n> \n> I imagined we'd have something along the lines of: ALTER TABLE\n> partitioned_table ALTER PARTITION part CREATE INDEX. I admit I don't\n> know how that would look when faced with multi-level partitioning.\n\nSome general comments on this initiative:\n\nI like it that partitions are normal tables in PostgreSQL, and that I\ncan just use them in SQL statements.\n\nIf there is really no other way to avoid certain problems, we can change\nthat, but I would greatly prefer if it remain the way it is now.\nPerhaps we can document such deadlock risks, or we can find a ways to\navoid them.\n\nI think reducing functionality should be the last route to consider.\nIf we introduce new syntax to access partitions, we will end up with a lot\nof new syntax, and we might well have an endless stream of requests for\nways to do X with a partition.\n\nYours,\nLaurenz Albe\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 15:39:44 +0200", "msg_from": "Laurenz Albe <laurenz.albe@cybertec.at>", "msg_from_op": false, "msg_subject": "Re: Deadlock risk while inserting directly into partition?" } ]
[ { "msg_contents": "genbki.pl says:\n\n# Perform OID lookups on an array of OID names.\n# If we don't have a unique value to substitute, warn and\n# leave the entry unchanged.\n# (A warning seems sufficient because the bootstrap backend will reject\n# non-numeric values anyway. So we might as well detect multiple problems\n # within this genbki.pl run.)\n\nThis is fine, but I have found this to be a bit cumbersome in practice \nsometimes, because errors are then not easily seen at build time but \nhave to be extracted from some log files during test runs.\n\nI propose the attached patch to make genbki.pl error out if it \nencounters any errors in this routine, while preserving the property \nthat all errors in one run are reported.", "msg_date": "Wed, 23 Jun 2021 11:26:50 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": true, "msg_subject": "genbki stricter error handling" }, { "msg_contents": "Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n> I propose the attached patch to make genbki.pl error out if it \n> encounters any errors in this routine, while preserving the property \n> that all errors in one run are reported.\n\n+1, looks sane in a quick read-through.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 23 Jun 2021 09:27:30 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: genbki stricter error handling" } ]
[ { "msg_contents": "Hi,\n\nIvan Frolkov reported a problem with choosing a non-optimal index during \na query optimization. This problem appeared after building of an \nextended statistics.\n\nI prepared the test case (see t.sql in attachment).\nFor reproduction of this case we need to have a composite primary key \nindex and one another index.\nBefore creation of extended statistics, SELECT from the table choose PK \nindex and returns only one row. But after, this SELECT picks alternative \nindex, fetches and filters many tuples.\n\nThe problem is related to a corner case in btree cost estimation procedure:\nif postgres detects unique one-row index scan, it sets\nnumIndexTuples to 1.0.\n\nBut the selectivity is calculated as usual, by the \nclauselist_selectivity() routine and can have a value, much more than \ncorresponding to single tuple. This selectivity value is used later in \nthe code to calculate a number of fetched tuples and can lead to \nchoosing of an suboptimal index.\n\nThe attached patch is my suggestion to fix this problem.\n\n-- \nregards,\nAndrey Lepikhov\nPostgres Professional", "msg_date": "Wed, 23 Jun 2021 19:19:26 +0500", "msg_from": "\"Andrey V. Lepikhov\" <a.lepikhov@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "On Wed, Jun 23, 2021 at 7:19 AM Andrey V. Lepikhov\n<a.lepikhov@postgrespro.ru> wrote:\n> Ivan Frolkov reported a problem with choosing a non-optimal index during\n> a query optimization. This problem appeared after building of an\n> extended statistics.\n\nAny thoughts on this, Tomas?\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Tue, 10 Aug 2021 17:48:19 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "On 8/11/21 2:48 AM, Peter Geoghegan wrote:\n> On Wed, Jun 23, 2021 at 7:19 AM Andrey V. Lepikhov\n> <a.lepikhov@postgrespro.ru> wrote:\n>> Ivan Frolkov reported a problem with choosing a non-optimal index during\n>> a query optimization. This problem appeared after building of an\n>> extended statistics.\n> \n> Any thoughts on this, Tomas?\n> \n\nThanks for reminding me, I missed / forgot about this thread.\n\nI agree the current behavior is unfortunate, but I'm not convinced the \nproposed patch is fixing the right place - doesn't this mean the index \ncosting won't match the row estimates displayed by EXPLAIN?\n\nI wonder if we should teach clauselist_selectivity about UNIQUE indexes, \nand improve the cardinality estimates directly, not just costing for \nindex scans.\n\nAlso, is it correct that the patch calculates num_sa_scans only when \n(numIndexTuples >= 0.0)?\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Thu, 12 Aug 2021 01:26:29 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "On 8/12/21 4:26 AM, Tomas Vondra wrote:\n> On 8/11/21 2:48 AM, Peter Geoghegan wrote:\n>> On Wed, Jun 23, 2021 at 7:19 AM Andrey V. Lepikhov\n>> <a.lepikhov@postgrespro.ru> wrote:\n>>> Ivan Frolkov reported a problem with choosing a non-optimal index during\n>>> a query optimization. This problem appeared after building of an\n>>> extended statistics.\n>>\n>> Any thoughts on this, Tomas?\n>>\n> \n> Thanks for reminding me, I missed / forgot about this thread.\n> \n> I agree the current behavior is unfortunate, but I'm not convinced the \n> proposed patch is fixing the right place - doesn't this mean the index \n> costing won't match the row estimates displayed by EXPLAIN?\nI think, it is not a problem. In EXPLAIN you will see only 1 row \nwith/without this patch.\n> \n> I wonder if we should teach clauselist_selectivity about UNIQUE indexes, \n> and improve the cardinality estimates directly, not just costing for \n> index scans.\nThis idea looks better. I will try to implement it.\n> \n> Also, is it correct that the patch calculates num_sa_scans only when \n> (numIndexTuples >= 0.0)?\nThanks, fixed.\n\n-- \nregards,\nAndrey Lepikhov\nPostgres Professional", "msg_date": "Fri, 13 Aug 2021 12:01:24 +0500", "msg_from": "\"Andrey V. Lepikhov\" <a.lepikhov@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "On 12/8/21 04:26, Tomas Vondra wrote:\n> On 8/11/21 2:48 AM, Peter Geoghegan wrote:\n>> On Wed, Jun 23, 2021 at 7:19 AM Andrey V. Lepikhov\n> I agree the current behavior is unfortunate, but I'm not convinced the \n> proposed patch is fixing the right place - doesn't this mean the index \n> costing won't match the row estimates displayed by EXPLAIN?\nI rewrote the patch. It's now simpler and shorter. May be more convenient.\nAlso, it includes a regression test to detect the problem in future.\n> \n> I wonder if we should teach clauselist_selectivity about UNIQUE indexes, \n> and improve the cardinality estimates directly, not just costing for \n> index scans.\nI tried to implement this in different ways. But it causes additional \noverhead and code complexity - analyzing a list of indexes and match \nclauses of each index with input clauses in each selectivity estimation.\nI don't like that way and propose a new patch in attachment.\n\n-- \nregards,\nAndrey Lepikhov\nPostgres Professional", "msg_date": "Mon, 30 Aug 2021 11:44:28 +0500", "msg_from": "Andrey Lepikhov <a.lepikhov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "Andrey Lepikhov <a.lepikhov@postgrespro.ru> writes:\n> On 12/8/21 04:26, Tomas Vondra wrote:\n>> I wonder if we should teach clauselist_selectivity about UNIQUE indexes, \n>> and improve the cardinality estimates directly, not just costing for \n>> index scans.\n\n> I tried to implement this in different ways. But it causes additional \n> overhead and code complexity - analyzing a list of indexes and match \n> clauses of each index with input clauses in each selectivity estimation.\n> I don't like that way and propose a new patch in attachment.\n\nI looked at this briefly. I do not think that messing with\nbtcostestimate/genericcostestimate is the right response at all.\nThe problem can be demonstrated with no index whatever, as in the\nattached shortened version of the original example. I get\n\n QUERY PLAN \n---------------------------------------------------\n Seq Scan on a (cost=0.00..46.02 rows=1 width=12)\n Filter: ((x = 1) AND (y = 1) AND (z = 1))\n(2 rows)\n\nbefore adding the extended stats, and\n\n QUERY PLAN \n----------------------------------------------------\n Seq Scan on a (cost=0.00..46.02 rows=28 width=12)\n Filter: ((x = 1) AND (y = 1) AND (z = 1))\n(2 rows)\n\nafterwards. So the extended stats have made the rowcount\nestimate significantly worse, which seems like an indicator of a\nbug somewhere in extended stats. The more so because I can crank\ndefault_statistics_target all the way to 10000 without these\nestimates changing. If we can't get a dead-on estimate for a\n2001-row table at that stats level, we're doing something wrong,\nsurely?\n\nAlso, I found that if I ask only for ndistinct stats,\nI still get rows=1. The fishiness seems to be directly\na problem with dependencies stats.\n\n\t\t\tregards, tom lane", "msg_date": "Thu, 07 Jul 2022 18:07:36 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "On 7/8/22 03:07, Tom Lane wrote:\n> Andrey Lepikhov <a.lepikhov@postgrespro.ru> writes:\n>> On 12/8/21 04:26, Tomas Vondra wrote:\n>>> I wonder if we should teach clauselist_selectivity about UNIQUE indexes,\n>>> and improve the cardinality estimates directly, not just costing for\n>>> index scans.\n> \n>> I tried to implement this in different ways. But it causes additional\n>> overhead and code complexity - analyzing a list of indexes and match\n>> clauses of each index with input clauses in each selectivity estimation.\n>> I don't like that way and propose a new patch in attachment.\n> \n> I looked at this briefly. I do not think that messing with\n> btcostestimate/genericcostestimate is the right response at all.\n> The problem can be demonstrated with no index whatever, as in the\n> attached shortened version of the original example. I get\n\nI partly agree with you. Yes, I see the problem too. But also we have a \nproblem that I described above: optimizer don't choose a path with \nminimal selectivity from a set selectivities which shows cardinality \nless than 1 (see badestimate2.sql).\nNew patch (see in attachment), fixes this problem.\n\n-- \nRegards\nAndrey Lepikhov\nPostgres Professional", "msg_date": "Mon, 11 Jul 2022 12:57:36 +0500", "msg_from": "Andrey Lepikhov <a.lepikhov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "Hi,\n\nOn 2022-07-11 12:57:36 +0500, Andrey Lepikhov wrote:\n> On 7/8/22 03:07, Tom Lane wrote:\n> > Andrey Lepikhov <a.lepikhov@postgrespro.ru> writes:\n> > > On 12/8/21 04:26, Tomas Vondra wrote:\n> > > > I wonder if we should teach clauselist_selectivity about UNIQUE indexes,\n> > > > and improve the cardinality estimates directly, not just costing for\n> > > > index scans.\n> > \n> > > I tried to implement this in different ways. But it causes additional\n> > > overhead and code complexity - analyzing a list of indexes and match\n> > > clauses of each index with input clauses in each selectivity estimation.\n> > > I don't like that way and propose a new patch in attachment.\n> > \n> > I looked at this briefly. I do not think that messing with\n> > btcostestimate/genericcostestimate is the right response at all.\n> > The problem can be demonstrated with no index whatever, as in the\n> > attached shortened version of the original example. I get\n> \n> I partly agree with you. Yes, I see the problem too. But also we have a\n> problem that I described above: optimizer don't choose a path with minimal\n> selectivity from a set selectivities which shows cardinality less than 1\n> (see badestimate2.sql).\n> New patch (see in attachment), fixes this problem.\n\nThis causes the mains regression tests to fail due to a planner change:\n\nhttps://cirrus-ci.com/build/6680222884429824\n\ndiff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/join.out /tmp/cirrus-ci-build/build/testrun/regress/regress/results/join.out\n--- /tmp/cirrus-ci-build/src/test/regress/expected/join.out\t2022-11-22 12:27:18.852087140 +0000\n+++ /tmp/cirrus-ci-build/build/testrun/regress/regress/results/join.out\t2022-11-22 12:28:47.934938882 +0000\n@@ -6671,10 +6671,9 @@\n Merge Cond: (j1.id1 = j2.id1)\n Join Filter: (j2.id2 = j1.id2)\n -> Index Scan using j1_id1_idx on j1\n- -> Index Only Scan using j2_pkey on j2\n+ -> Index Scan using j2_id1_idx on j2\n Index Cond: (id1 >= ANY ('{1,5}'::integer[]))\n- Filter: ((id1 % 1000) = 1)\n-(7 rows)\n+(6 rows)\n \n select * from j1\n inner join j2 on j1.id1 = j2.id1 and j1.id2 = j2.id2\n \nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Tue, 22 Nov 2022 09:14:50 -0800", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "On 12/8/2021 06:26, Tomas Vondra wrote:\n> On 8/11/21 2:48 AM, Peter Geoghegan wrote:\n>> On Wed, Jun 23, 2021 at 7:19 AM Andrey V. Lepikhov\n>> <a.lepikhov@postgrespro.ru> wrote:\n>>> Ivan Frolkov reported a problem with choosing a non-optimal index during\n>>> a query optimization. This problem appeared after building of an\n>>> extended statistics.\n>>\n>> Any thoughts on this, Tomas?\n>>\n> \n> Thanks for reminding me, I missed / forgot about this thread.\n> \n> I agree the current behavior is unfortunate, but I'm not convinced the \n> proposed patch is fixing the right place - doesn't this mean the index \n> costing won't match the row estimates displayed by EXPLAIN?\n> \n> I wonder if we should teach clauselist_selectivity about UNIQUE indexes, \n> and improve the cardinality estimates directly, not just costing for \n> index scans.\n> \n> Also, is it correct that the patch calculates num_sa_scans only when \n> (numIndexTuples >= 0.0)?\nI can't stop thinking about this issue. It is bizarre when Postgres \nchooses a non-unique index if a unique index gives us proof of minimum scan.\nI don't see a reason to teach the clauselist_selectivity() routine to \nestimate UNIQUE indexes. We add some cycles, but it will work with btree \nindexes only.\nMaybe to change compare_path_costs_fuzzily() and add some heuristic, for \nexample:\n\"If selectivity of both paths gives us no more than 1 row, prefer to use \na unique index or an index with least selectivity.\"\n\n-- \nregards,\nAndrey Lepikhov\nPostgres Professional\n\n\n\n", "msg_date": "Mon, 25 Sep 2023 11:30:10 +0700", "msg_from": "Andrey Lepikhov <a.lepikhov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "On 9/25/23 06:30, Andrey Lepikhov wrote:\n> ...\n> I can't stop thinking about this issue. It is bizarre when Postgres\n> chooses a non-unique index if a unique index gives us proof of minimum\n> scan.\n\nThat's true, but no one implemented this heuristics. So the \"proof of\nminimum scan\" is merely hypothetical - the optimizer is unaware of it.\n\n> I don't see a reason to teach the clauselist_selectivity() routine to\n> estimate UNIQUE indexes. We add some cycles, but it will work with btree\n> indexes only.\n\nI'm not sure I understand what this is meant to say. Can you elaborate?\nWe only allow UNIQUE for btree indexes anyway, so what exactly is the\nproblem here?\n\n> Maybe to change compare_path_costs_fuzzily() and add some heuristic, for\n> example:\n> \"If selectivity of both paths gives us no more than 1 row, prefer to use\n> a unique index or an index with least selectivity.\"\n> \n\nI don't understand how this would work. What do yo mean by \"selectivity\nof a path\"? AFAICS the problem here is that we estimate a path to return\nmore rows (while we know there can only be 1, thanks to UNIQUE index).\n\nSo how would you know the path does not give us more than 1 row? Surely\nyou're not proposing compare_path_costs_fuzzily() to do something\nexpensive like analyzing the paths, or something.\n\nAlso, how would it work in upper levels? If you just change which path\nwe keep, but leave the inaccurate row estimate in place, that may fix\nthat level, but it's certainly going to confuse later planning steps.\n\nIMHO the problem here is that we produce wrong estimate, so we better\nfix that, instead of adding band-aids to other places.\n\nThis happens because functional dependencies are very simple type of\nstatistics - it has some expectations about the input data and also the\nqueries executed on it. For example it assumes the data is reasonably\nhomogeneous, so that we can calculate a global \"degree\".\n\nBut the data in the example directly violates that - it has 1000 rows\nthat are very random (so we'd find no dependencies), and 1000 rows with\nperfect dependencies. Hence we end with degree=0.5, which approximates\nthe dependencies to all data. Not great, true, but that's the price for\nsimplicity of this statistics kind.\n\nSo the simplest solution is to disable dependencies on such data sets.\nIt's a bit inconvenient/unfortunate we build dependencies by default,\nand people may not be aware of there assumptions.\n\nPerhaps we could/should make dependency_degree() more pessimistic when\nwe find some \"violations\" of the rule (we intentionally are not strict\nabout it, because data are imperfect). I don't have a good idea how to\nchange the formulas, but I'm open to the idea in principle.\n\nThe other thing we could do is checking for unique indexes in\nclauselist_selectivity, and if we find a match we can just skip the\nextended statistics altogether. Not sure how expensive this would be,\nbut for typical cases (with modest number of indexes) perhaps OK. It\nwouldn't work without a unique index, but I don't have a better idea.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Thu, 2 Nov 2023 18:37:32 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "Thanks for detaied answer,\n\nOn 3/11/2023 00:37, Tomas Vondra wrote:\n> On 9/25/23 06:30, Andrey Lepikhov wrote:\n>> ...\n>> I can't stop thinking about this issue. It is bizarre when Postgres\n>> chooses a non-unique index if a unique index gives us proof of minimum\n>> scan.\n> That's true, but no one implemented this heuristics. So the \"proof of\n> minimum scan\" is merely hypothetical - the optimizer is unaware of it.\n\nSee the simple patch in the attachment. There, I have attempted to \nresolve situations of uncertainty to avoid making decisions based solely \non the order of indexes in the list.\n\n>> I don't see a reason to teach the clauselist_selectivity() routine to\n>> estimate UNIQUE indexes. We add some cycles, but it will work with btree\n>> indexes only.\n> I'm not sure I understand what this is meant to say. Can you elaborate?\n> We only allow UNIQUE for btree indexes anyway, so what exactly is the\n> problem here?\n\nPartly, you already answered yourself below: we have unique index \nestimation in a few estimation calls, but go through the list of indexes \neach time.\nAlso, for this sake, we would add some input parameters, usually NULL, \nbecause many estimations don't involve indexes at all.\n\n>> Maybe to change compare_path_costs_fuzzily() and add some heuristic, for\n>> example:\n>> \"If selectivity of both paths gives us no more than 1 row, prefer to use\n>> a unique index or an index with least selectivity.\"\n> I don't understand how this would work. What do yo mean by \"selectivity\n> of a path\"? AFAICS the problem here is that we estimate a path to return\n> more rows (while we know there can only be 1, thanks to UNIQUE index).\n\nOops, I meant cardinality. See the patch in the attachment.\n\n> So how would you know the path does not give us more than 1 row? Surely\n> you're not proposing compare_path_costs_fuzzily() to do something\n> expensive like analyzing the paths, or something.\n\nI solely propose to make optimizer more consecutive in its decisions: if \nwe have one row for both path candidates, use uniqueness of the index or \nvalue of selectivity as one more parameter.\n\n> Also, how would it work in upper levels? If you just change which path\n> we keep, but leave the inaccurate row estimate in place, that may fix\n> that level, but it's certainly going to confuse later planning steps.\nIt is designed for the only scan level.\n> IMHO the problem here is that we produce wrong estimate, so we better\n> fix that, instead of adding band-aids to other places.\n\nAgree. I am looking for a solution to help users somehow resolve such \nproblems. As an alternative solution, I can propose a selectivity hook \nor (maybe even better) - use the pathlist approach and add indexes into \nthe index list with some predefined order - at first positions, place \nunique indexes with more columns, etc.\n\n> This happens because functional dependencies are very simple type of\n> statistics - it has some expectations about the input data and also the\n> queries executed on it. For example it assumes the data is reasonably\n> homogeneous, so that we can calculate a global \"degree\".\n> \n> But the data in the example directly violates that - it has 1000 rows\n> that are very random (so we'd find no dependencies), and 1000 rows with\n> perfect dependencies. Hence we end with degree=0.5, which approximates\n> the dependencies to all data. Not great, true, but that's the price for\n> simplicity of this statistics kind.\n> \n> So the simplest solution is to disable dependencies on such data sets.\n> It's a bit inconvenient/unfortunate we build dependencies by default,\n> and people may not be aware of there assumptions.\n> \n> Perhaps we could/should make dependency_degree() more pessimistic when\n> we find some \"violations\" of the rule (we intentionally are not strict\n> about it, because data are imperfect). I don't have a good idea how to\n> change the formulas, but I'm open to the idea in principle.\n\nThanks for the explanation!\n\n> The other thing we could do is checking for unique indexes in\n> clauselist_selectivity, and if we find a match we can just skip the\n> extended statistics altogether. Not sure how expensive this would be,\n> but for typical cases (with modest number of indexes) perhaps OK. It\n> wouldn't work without a unique index, but I don't have a better idea.\nIt looks a bit expensive for me. But I am ready to try, if current \nsolution doesn't look applicable.\n\n-- \nregards,\nAndrei Lepikhov\nPostgres Professional", "msg_date": "Wed, 22 Nov 2023 13:31:44 +0700", "msg_from": "Andrei Lepikhov <a.lepikhov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "Second version of the patch - resolve non-symmetrical decision, thanks \nto Teodor Sigaev's review.\n\n\n-- \nregards,\nAndrei Lepikhov\nPostgres Professional", "msg_date": "Mon, 27 Nov 2023 11:44:13 +0700", "msg_from": "Andrei Lepikhov <a.lepikhov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "Hi!\n\nI'd like to get this subject off the ground. The problem originally\ndescribed in [1] obviously comes from wrong selectivity estimation.\n \"Dependencies\" extended statistics lead to significant selectivity miss\n24/1000 instead of 1/1000. When the estimation is correct, the PostgreSQL\noptimizer is capable of choosing the appropriate unique index for the query.\n\nTom pointed out in [2] that this might be a problem of \"Dependencies\"\nextended statistics. I've rechecked this. The dataset consists of two\nparts. The first part defined in the CREATE TABLE statement contains\nindependent values. The second part defined in the INSERT statement\ncontains dependent values. \"Dependencies\" extended statistics estimate\ndependency degree as a fraction of rows containing dependent values.\nAccording to this definition, it correctly estimates the dependency degree\nas about 0.5 for all the combinations. So, the error in the estimate comes\nfrom the synergy of two factors: MCV estimation of the frequency of\nindividual values, and spreading of average dependency degree for those\nvalues, which is not relevant for them. I don't think there is a way to\nfix \"dependencies\" extended statistics because it works exactly as\ndesigned. I have to note that instead of fixing \"dependencies\" extended\nstatistics you can just add multi-column MCV statistics, which would fix\nthe problem.\n\nCREATE STATISTICS aestat(dependencies,ndistinct,mcv) ON x,y,z FROM a;\n\nIndependently on how well our statistics work, it looks pitiful that we\ncouldn't fix that using the knowledge of unique constraints on the table.\nDespite statistics, which give us just more or less accurate estimates, the\nconstraint is something we really enforce and thus can rely on. The\npatches provided by Andrei in [1], [3], and [4] fix this issue at the index\nscan path level. As Thomas pointed out in [5], that could lead to\ninconsistency between the number of rows used for unique index scan\nestimation and the value displayed in EXPLAIN (and used for other paths).\nEven though this was debated in [6], I can confirm this inconsistency.\nThus, with the patch published in [4], I can see the 28-row estimation with\na unique index scan.\n\n` QUERY PLAN\n-----------------------------------------------------------------------\n Index Only Scan using a_pkey on a (cost=0.28..8.30 rows=28 width=12)\n Index Cond: ((x = 1) AND (y = 1) AND (z = 1))\n(2 rows)\n\nAlso, there is a set of patches [7], [8], and [9], which makes the\noptimizer consider path selectivity as long as path costs during the path\nselection. I've rechecked that none of these patches could resolve the\noriginal problem described in [1]. Also, I think they are quite tricky.\nThe model of our optimizer assumes that paths in the list should be the\ndifferent ways of getting the same result. If we choose the paths by their\nselectivity, that breaks this model. I don't say there is no way for\nthis. But if we do this, that would require significant rethinking of our\noptimizer model and possible revision of a significant part of it. Anyway,\nI think if there is still interest in this, that should be moved into a\nseparate thread to keep this thread focused on the problem described in [1].\n\nFinally, I'd like to note that the issue described in [1] is mostly the\nselectivity estimation problem. It could be solved by adding the\nmulti-column MCV statistics. The patches published so far look more like\nhacks for particular use cases rather than appropriate solutions. It still\nlooks promising to me to use the knowledge of unique constraints during\nselectivity estimation [10]. Even though it's hard to implement and\npossibly implies some overhead, it fits the current model. I also think\nunique contracts could probably be used in some way to improve estimates\neven when there is no full match.\n\nLinks.\n1.\nhttps://www.postgresql.org/message-id/0ca4553c-1f34-12ba-9122-44199d1ced41%40postgrespro.ru\n2. https://www.postgresql.org/message-id/3119052.1657231656%40sss.pgh.pa.us\n3.\nhttps://www.postgresql.org/message-id/90a1d6ef-c777-b95d-9f77-0065ad4522df%40postgrespro.ru\n4.\nhttps://www.postgresql.org/message-id/a5a18d86-c0e5-0ceb-9a18-be1beb2d2944%40postgrespro.ru\n5.\nhttps://www.postgresql.org/message-id/f8044836-5d61-a4e0-af82-5821a2a1f0a7%40enterprisedb.com\n6.\nhttps://www.postgresql.org/message-id/90a1d6ef-c777-b95d-9f77-0065ad4522df%40postgrespro.ru\n7.\nhttps://www.postgresql.org/message-id/2df148b5-0bb8-f80b-ac03-251682fab585%40postgrespro.ru\n8.\nhttps://www.postgresql.org/message-id/6fb43191-2df3-4791-b307-be754e648276%40postgrespro.ru\n9.\nhttps://www.postgresql.org/message-id/154f786a-06a0-4fb1-b8a4-16c66149731b%40postgrespro.ru\n10.\nhttps://www.postgresql.org/message-id/f8044836-5d61-a4e0-af82-5821a2a1f0a7%40enterprisedb.com\n\n------\nRegards,\nAlexander Korotkov\n\nHi!I'd like to get this subject off the ground.  The problem originally described in [1] obviously comes from wrong selectivity estimation.  \"Dependencies\" extended statistics lead to significant selectivity miss 24/1000 instead of 1/1000.  When the estimation is correct, the PostgreSQL optimizer is capable of choosing the appropriate unique index for the query.Tom pointed out in [2] that this might be a problem of \"Dependencies\" extended statistics.  I've rechecked this.  The dataset consists of two parts.  The first part defined in the CREATE TABLE statement contains independent values.  The second part defined in the INSERT statement contains dependent values.  \"Dependencies\" extended statistics estimate dependency degree as a fraction of rows containing dependent values.  According to this definition, it correctly estimates the dependency degree as about 0.5 for all the combinations.  So, the error in the estimate comes from the synergy of two factors: MCV estimation of the frequency of individual values, and spreading of average dependency degree for those values, which is not relevant for them.  I don't think there is a way to fix \"dependencies\" extended statistics because it works exactly as designed.  I have to note that instead of fixing \"dependencies\" extended statistics you can just add multi-column MCV statistics, which would fix the problem.CREATE STATISTICS aestat(dependencies,ndistinct,mcv) ON x,y,z FROM a;Independently on how well our statistics work, it looks pitiful that we couldn't fix that using the knowledge of unique constraints on the table.  Despite statistics, which give us just more or less accurate estimates, the constraint is something we really enforce and thus can rely on.  The patches provided by Andrei in [1], [3], and [4] fix this issue at the index scan path level.  As Thomas pointed out in [5], that could lead to inconsistency between the number of rows used for unique index scan estimation and the value displayed in EXPLAIN (and used for other paths).  Even though this was debated in [6], I can confirm this inconsistency.  Thus, with the patch published in [4], I can see the 28-row estimation with a unique index scan.`                              QUERY PLAN----------------------------------------------------------------------- Index Only Scan using a_pkey on a  (cost=0.28..8.30 rows=28 width=12)   Index Cond: ((x = 1) AND (y = 1) AND (z = 1))(2 rows)Also, there is a set of patches [7], [8], and [9], which makes the optimizer consider path selectivity as long as path costs during the path selection.  I've rechecked that none of these patches could resolve the original problem described in [1].  Also, I think they are quite tricky.  The model of our optimizer assumes that paths in the list should be the different ways of getting the same result.  If we choose the paths by their selectivity, that breaks this model.  I don't say there is no way for this.  But if we do this, that would require significant rethinking of our optimizer model and possible revision of a significant part of it.  Anyway, I think if there is still interest in this, that should be moved into a separate thread to keep this thread focused on the problem described in [1].Finally, I'd like to note that the issue described in [1] is mostly the selectivity estimation problem.  It could be solved by adding the multi-column MCV statistics.  The patches published so far look more like hacks for particular use cases rather than appropriate solutions.  It still looks promising to me to use the knowledge of unique constraints during selectivity estimation [10].  Even though it's hard to implement and possibly implies some overhead, it fits the current model.  I also think unique contracts could probably be used in some way to improve estimates even when there is no full match.Links.1. https://www.postgresql.org/message-id/0ca4553c-1f34-12ba-9122-44199d1ced41%40postgrespro.ru2. https://www.postgresql.org/message-id/3119052.1657231656%40sss.pgh.pa.us3. https://www.postgresql.org/message-id/90a1d6ef-c777-b95d-9f77-0065ad4522df%40postgrespro.ru4. https://www.postgresql.org/message-id/a5a18d86-c0e5-0ceb-9a18-be1beb2d2944%40postgrespro.ru5. https://www.postgresql.org/message-id/f8044836-5d61-a4e0-af82-5821a2a1f0a7%40enterprisedb.com6. https://www.postgresql.org/message-id/90a1d6ef-c777-b95d-9f77-0065ad4522df%40postgrespro.ru7. https://www.postgresql.org/message-id/2df148b5-0bb8-f80b-ac03-251682fab585%40postgrespro.ru8. https://www.postgresql.org/message-id/6fb43191-2df3-4791-b307-be754e648276%40postgrespro.ru9. https://www.postgresql.org/message-id/154f786a-06a0-4fb1-b8a4-16c66149731b%40postgrespro.ru10. https://www.postgresql.org/message-id/f8044836-5d61-a4e0-af82-5821a2a1f0a7%40enterprisedb.com------Regards,Alexander Korotkov", "msg_date": "Mon, 18 Dec 2023 15:29:35 +0200", "msg_from": "Alexander Korotkov <aekorotkov@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "On 18/12/2023 15:29, Alexander Korotkov wrote:\n> Also, there is a set of patches [7], [8], and [9], which makes the \n> optimizer consider path selectivity as long as path costs during the \n> path selection.  I've rechecked that none of these patches could resolve \n> the original problem described in [1].\nIt is true. We accidentally mixed two different problems in one thread.\n>  Also, I think they are quite \n> tricky.  The model of our optimizer assumes that paths in the list \n> should be the different ways of getting the same result.  If we choose \n> the paths by their selectivity, that breaks this model.  I don't say \n> there is no way for this.  But if we do this, that would require \n> significant rethinking of our optimizer model and possible revision of a \n> significant part of it.\nI can't understand that. In [9] we just elaborate the COSTS_EQUAL case \nand establish final decision on more stable basis than a casual order of \nindexes in the list.\n>  Anyway, I think if there is still interest in \n> this, that should be moved into a separate thread to keep this thread \n> focused on the problem described in [1].\nAgree. IMO, the problem of optimizer dependency on an order of indexes \nin the relation index list is more urgent for now.\n> \n> Finally, I'd like to note that the issue described in [1] is mostly the \n> selectivity estimation problem.  It could be solved by adding the \n> multi-column MCV statistics.  The patches published so far look more \n> like hacks for particular use cases rather than appropriate solutions. \n> It still looks promising to me to use the knowledge of unique \n> constraints during selectivity estimation [10].  Even though it's hard \n> to implement and possibly implies some overhead, it fits the current \n> model.  I also think unique contracts could probably be used in some way \n> to improve estimates even when there is no full match.\nI have tried to use the knowledge about unique indexes in the \nselectivity estimation routine. But it looks invasive and adds a lot of \noverhead.\n\n-- \nregards,\nAndrei Lepikhov\nPostgres Professional\n\n\n\n", "msg_date": "Thu, 21 Dec 2023 10:41:35 +0200", "msg_from": "Andrei Lepikhov <a.lepikhov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" }, { "msg_contents": "On Thu, Dec 21, 2023 at 10:41 AM Andrei Lepikhov\n<a.lepikhov@postgrespro.ru> wrote:\n>\n> On 18/12/2023 15:29, Alexander Korotkov wrote:\n> > Also, there is a set of patches [7], [8], and [9], which makes the\n> > optimizer consider path selectivity as long as path costs during the\n> > path selection. I've rechecked that none of these patches could resolve\n> > the original problem described in [1].\n> It is true. We accidentally mixed two different problems in one thread.\n> > Also, I think they are quite\n> > tricky. The model of our optimizer assumes that paths in the list\n> > should be the different ways of getting the same result. If we choose\n> > the paths by their selectivity, that breaks this model. I don't say\n> > there is no way for this. But if we do this, that would require\n> > significant rethinking of our optimizer model and possible revision of a\n> > significant part of it.\n> I can't understand that. In [9] we just elaborate the COSTS_EQUAL case\n> and establish final decision on more stable basis than a casual order of\n> indexes in the list.\n\nI took a closer look at the patch in [9]. I should drop my argument\nabout breaking the model, because add_path() already considers other\naspects than just costs. But I have two more note about that patch:\n\n1) It seems that you're determining the fact that the index path\nshould return strictly one row by checking path->rows <= 1.0 and\nindexinfo->unique. Is it really guaranteed that in this case quals\nare matching unique constraint? path->rows <= 1.0 could be just an\nestimation error. Or one row could be correctly estimated, but it's\ngoing to be selected by some quals matching unique constraint and\nother quals in recheck. So, it seems there is a risk to select\nsuboptimal index due to this condition.\n\n2) Even for non-unique indexes this patch is putting new logic on top\nof the subsequent code. How we can prove it's going to be a win?\nThat could lead, for instance, to dropping parallel-safe paths in\ncases we didn't do so before.\n\nAnyway, please start a separate thread if you're willing to put more\nwork into this.\n\n> > Anyway, I think if there is still interest in\n> > this, that should be moved into a separate thread to keep this thread\n> > focused on the problem described in [1].\n> Agree. IMO, the problem of optimizer dependency on an order of indexes\n> in the relation index list is more urgent for now.\n> >\n> > Finally, I'd like to note that the issue described in [1] is mostly the\n> > selectivity estimation problem. It could be solved by adding the\n> > multi-column MCV statistics. The patches published so far look more\n> > like hacks for particular use cases rather than appropriate solutions.\n> > It still looks promising to me to use the knowledge of unique\n> > constraints during selectivity estimation [10]. Even though it's hard\n> > to implement and possibly implies some overhead, it fits the current\n> > model. I also think unique contracts could probably be used in some way\n> > to improve estimates even when there is no full match.\n> I have tried to use the knowledge about unique indexes in the\n> selectivity estimation routine. But it looks invasive and adds a lot of\n> overhead.\n\nI got it. But it doesn't look enough to decide this is no way. Could\nyou, please, share some of your results? It might happen that we just\nneed to rework some of data structures to make this information more\neasily accessible at selectivity estimation stage.\n\n------\nRegards,\nAlexander Korotkov\n\n\n", "msg_date": "Thu, 21 Dec 2023 12:10:46 +0200", "msg_from": "Alexander Korotkov <aekorotkov@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Postgres picks suboptimal index after building of an extended\n statistics" } ]
[ { "msg_contents": "Hi,\n\nNot per Coverity!\n\nAbout comments:\n1. For drop, no \"copy data\"\n2. Only refresh the added/*dropped* list of publications. (my emphasis)\n\nThe documentation says:\nhttps://www.postgresql.org/docs/14/sql-altersubscription.html\n\n\"DROP PUBLICATION *publication_name*\n\nChanges the list of subscribed publications. SET replaces the entire list\nof publications with a new list, ADD adds additional publications, DROP\nremoves publications from the list of publications. See CREATE SUBSCRIPTION\n<https://www.postgresql.org/docs/14/sql-createsubscription.html> for more\ninformation. By default, this command will also act like REFRESH PUBLICATION,\nexcept that in case of ADD or DROP, only the added or dropped publications\nare refreshed.\n\n*set_publication_option* specifies additional options for this operation.\nThe supported options are:\nrefresh (boolean)\n\nWhen false, the command will not try to refresh table information. REFRESH\nPUBLICATION should then be executed separately. The default is true.\n\nAdditionally, refresh options as described under REFRESH PUBLICATION may be\nspecified.\"\nSo, is allowed DROP PUBLICATION with (refresh = true)\n\nI try some tests with subscription.sql:\nCREATE SUBSCRIPTION regress_testsub3 CONNECTION\n'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false,\nstreaming = true);\n+CREATE SUBSCRIPTION regress_testsub3 CONNECTION\n'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false,\nstreaming = true);\n+WARNING: tables were not subscribed, you will have to run ALTER\nSUBSCRIPTION ... REFRESH PUBLICATION to subscribe the tables\n\nALTER SUBSCRIPTION regress_testsub3 ENABLE;\nALTER SUBSCRIPTION regress_testsub3 REFRESH PUBLICATION;\n+ALTER SUBSCRIPTION regress_testsub3 ENABLE;\n+ALTER SUBSCRIPTION regress_testsub3 REFRESH PUBLICATION;\n+ERROR: could not connect to the publisher: connection to server at\n\"localhost\" (::1), port 58080 failed: FATAL: database\n\"regress_doesnotexist\" does not exist\n\n-- ok - delete active publication with refresh = true\nALTER SUBSCRIPTION regress_testsub3 DROP PUBLICATION testpub WITH (refresh\n= true);\n+-- ok - delete active publication with refresh = true\n+ALTER SUBSCRIPTION regress_testsub3 DROP PUBLICATION testpub WITH (refresh\n= true);\n+ERROR: subscription must contain at least one publication\n\nI think this bug is live, for lack of tests with DROP PUBLICATION WITH\n(refresh = true).\n\nregards,\nRanier Vilela", "msg_date": "Wed, 23 Jun 2021 14:38:26 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Fix uninitialized copy_data var\n (src/backend/commands/subscriptioncmds.c)" }, { "msg_contents": "Em qua., 23 de jun. de 2021 às 14:38, Ranier Vilela <ranier.vf@gmail.com>\nescreveu:\n\n> Hi,\n>\n> Not per Coverity!\n>\n> About comments:\n> 1. For drop, no \"copy data\"\n> 2. Only refresh the added/*dropped* list of publications. (my emphasis)\n>\n> The documentation says:\n> https://www.postgresql.org/docs/14/sql-altersubscription.html\n>\n> \"DROP PUBLICATION *publication_name*\n>\n> Changes the list of subscribed publications. SET replaces the entire list\n> of publications with a new list, ADD adds additional publications, DROP\n> removes publications from the list of publications. See CREATE\n> SUBSCRIPTION\n> <https://www.postgresql.org/docs/14/sql-createsubscription.html> for more\n> information. By default, this command will also act like REFRESH\n> PUBLICATION, except that in case of ADD or DROP, only the added or\n> dropped publications are refreshed.\n>\n> *set_publication_option* specifies additional options for this operation.\n> The supported options are:\n> refresh (boolean)\n>\n> When false, the command will not try to refresh table information. REFRESH\n> PUBLICATION should then be executed separately. The default is true.\n>\n> Additionally, refresh options as described under REFRESH PUBLICATION may\n> be specified.\"\n> So, is allowed DROP PUBLICATION with (refresh = true)\n>\n> I try some tests with subscription.sql:\n> CREATE SUBSCRIPTION regress_testsub3 CONNECTION\n> 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false,\n> streaming = true);\n> +CREATE SUBSCRIPTION regress_testsub3 CONNECTION\n> 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false,\n> streaming = true);\n> +WARNING: tables were not subscribed, you will have to run ALTER\n> SUBSCRIPTION ... REFRESH PUBLICATION to subscribe the tables\n>\n> ALTER SUBSCRIPTION regress_testsub3 ENABLE;\n> ALTER SUBSCRIPTION regress_testsub3 REFRESH PUBLICATION;\n> +ALTER SUBSCRIPTION regress_testsub3 ENABLE;\n> +ALTER SUBSCRIPTION regress_testsub3 REFRESH PUBLICATION;\n> +ERROR: could not connect to the publisher: connection to server at\n> \"localhost\" (::1), port 58080 failed: FATAL: database\n> \"regress_doesnotexist\" does not exist\n>\n> -- ok - delete active publication with refresh = true\n> ALTER SUBSCRIPTION regress_testsub3 DROP PUBLICATION testpub WITH (refresh\n> = true);\n> +-- ok - delete active publication with refresh = true\n> +ALTER SUBSCRIPTION regress_testsub3 DROP PUBLICATION testpub WITH\n> (refresh = true);\n> +ERROR: subscription must contain at least one publication\n>\n> I think this bug is live, for lack of tests with DROP PUBLICATION WITH\n> (refresh = true).\n>\nhttps://github.com/postgres/postgres/commit/3af10943ce21450e299b3915b9cad47cd90369e9\nfixes some issues with subscriptioncmds.c, but IMHO still lack this issue.\n\nregards,\nRanier Vilela\n\nEm qua., 23 de jun. de 2021 às 14:38, Ranier Vilela <ranier.vf@gmail.com> escreveu:Hi,Not per Coverity!About comments:1. For drop, no \"copy data\"2. Only refresh the added/*dropped* list of publications. (my emphasis)The documentation says:https://www.postgresql.org/docs/14/sql-altersubscription.html\"DROP PUBLICATION publication_name\nChanges the list of subscribed publications. SET replaces the entire list of publications with a new list, ADD adds additional publications, DROP removes publications from the list of publications. See CREATE SUBSCRIPTION for more information. By default, this command will also act like REFRESH PUBLICATION, except that in case of ADD or DROP, only the added or dropped publications are refreshed.\nset_publication_option specifies additional options for this operation. The supported options are:\n\nrefresh (boolean)\nWhen false, the command will not try to refresh table information. REFRESH PUBLICATION should then be executed separately. The default is true.\n\n\nAdditionally, refresh options as described under REFRESH PUBLICATION may be specified.\"So, is allowed DROP PUBLICATION with (refresh = true)I try some tests with subscription.sql:CREATE SUBSCRIPTION regress_testsub3 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = true);+CREATE SUBSCRIPTION regress_testsub3 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = true);+WARNING:  tables were not subscribed, you will have to run ALTER SUBSCRIPTION ... REFRESH PUBLICATION to subscribe the tablesALTER SUBSCRIPTION regress_testsub3 ENABLE;ALTER SUBSCRIPTION regress_testsub3 REFRESH PUBLICATION;+ALTER SUBSCRIPTION regress_testsub3 ENABLE;+ALTER SUBSCRIPTION regress_testsub3 REFRESH PUBLICATION;+ERROR:  could not connect to the publisher: connection to server at \"localhost\" (::1), port 58080 failed: FATAL:  database \"regress_doesnotexist\" does not exist-- ok - delete active publication with refresh = trueALTER SUBSCRIPTION regress_testsub3 DROP PUBLICATION testpub WITH (refresh = true);+-- ok - delete active publication with refresh = true+ALTER SUBSCRIPTION regress_testsub3 DROP PUBLICATION testpub WITH (refresh = true);+ERROR:  subscription must contain at least one publicationI think this bug is live, for lack of tests with DROP PUBLICATION WITH (refresh = true).https://github.com/postgres/postgres/commit/3af10943ce21450e299b3915b9cad47cd90369e9fixes some issues with subscriptioncmds.c, but IMHO still lack this issue.regards,Ranier Vilela", "msg_date": "Fri, 25 Jun 2021 10:55:21 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Fix uninitialized copy_data var\n (src/backend/commands/subscriptioncmds.c)" }, { "msg_contents": "On Fri, Jun 25, 2021 at 11:55 PM Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n>\n> https://github.com/postgres/postgres/commit/3af10943ce21450e299b3915b9cad47cd90369e9\n> fixes some issues with subscriptioncmds.c, but IMHO still lack this issue.\n>\n\nI have not tested this, and gcc gave no warnings about it, but just by\nvisual code inspection I do agree with you that this looks like a\nproblem, even in the latest code.\n\nIIUC for the case ALTER_SUBSCRIPTION_DROP_PUBLICATION it looks like\nthe uninitialized copy_data local stack var would remain uninitialized\n(undefined) still at the time it is passed at\nAlterSubscription_refresh(sub, copy_data);\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia.\n\n\n", "msg_date": "Mon, 28 Jun 2021 10:17:55 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Fix uninitialized copy_data var\n (src/backend/commands/subscriptioncmds.c)" }, { "msg_contents": "On Mon, Jun 28, 2021 at 10:17:55AM +1000, Peter Smith wrote:\n> IIUC for the case ALTER_SUBSCRIPTION_DROP_PUBLICATION it looks like\n> the uninitialized copy_data local stack var would remain uninitialized\n> (undefined) still at the time it is passed at\n> AlterSubscription_refresh(sub, copy_data);\n\nYes, that's wrong. AlterSubscription_refresh() would happily look at\nthis uninitialized value when performing a refresh with this command.\nThat's the only code path using parse_subscription_options() with this\npattern. Applied on HEAD.\n--\nMichael", "msg_date": "Mon, 28 Jun 2021 12:29:06 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Fix uninitialized copy_data var\n (src/backend/commands/subscriptioncmds.c)" }, { "msg_contents": "Em dom., 27 de jun. de 2021 às 21:18, Peter Smith <smithpb2250@gmail.com>\nescreveu:\n\n> On Fri, Jun 25, 2021 at 11:55 PM Ranier Vilela <ranier.vf@gmail.com>\n> wrote:\n> >\n> >\n> >\n> https://github.com/postgres/postgres/commit/3af10943ce21450e299b3915b9cad47cd90369e9\n> > fixes some issues with subscriptioncmds.c, but IMHO still lack this\n> issue.\n> >\n>\n> I have not tested this, and gcc gave no warnings about it, but just by\n> visual code inspection I do agree with you that this looks like a\n> problem, even in the latest code.\n>\n> IIUC for the case ALTER_SUBSCRIPTION_DROP_PUBLICATION it looks like\n> the uninitialized copy_data local stack var would remain uninitialized\n> (undefined) still at the time it is passed at\n> AlterSubscription_refresh(sub, copy_data);\n>\nThanks Peter, for the review.\n\nregards,\nRanier Vilela\n\nEm dom., 27 de jun. de 2021 às 21:18, Peter Smith <smithpb2250@gmail.com> escreveu:On Fri, Jun 25, 2021 at 11:55 PM Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n>\n> https://github.com/postgres/postgres/commit/3af10943ce21450e299b3915b9cad47cd90369e9\n> fixes some issues with subscriptioncmds.c, but IMHO still lack this issue.\n>\n\nI have not tested this, and gcc gave no warnings about it, but just by\nvisual code inspection I do agree with you that this looks like a\nproblem, even in the latest code.\n\nIIUC for the case ALTER_SUBSCRIPTION_DROP_PUBLICATION it looks like\nthe uninitialized copy_data local stack var would remain uninitialized\n(undefined) still at the time it is passed at\nAlterSubscription_refresh(sub, copy_data);Thanks Peter, for the review.regards,Ranier Vilela", "msg_date": "Mon, 28 Jun 2021 08:05:37 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Fix uninitialized copy_data var\n (src/backend/commands/subscriptioncmds.c)" }, { "msg_contents": "Em seg., 28 de jun. de 2021 às 00:29, Michael Paquier <michael@paquier.xyz>\nescreveu:\n\n> On Mon, Jun 28, 2021 at 10:17:55AM +1000, Peter Smith wrote:\n> > IIUC for the case ALTER_SUBSCRIPTION_DROP_PUBLICATION it looks like\n> > the uninitialized copy_data local stack var would remain uninitialized\n> > (undefined) still at the time it is passed at\n> > AlterSubscription_refresh(sub, copy_data);\n>\n> Yes, that's wrong. AlterSubscription_refresh() would happily look at\n> this uninitialized value when performing a refresh with this command.\n> That's the only code path using parse_subscription_options() with this\n> pattern. Applied on HEAD.\n>\nHi Michael,\nThank you for this comitt.\n\nregards,\nRanier Vilela\n\nEm seg., 28 de jun. de 2021 às 00:29, Michael Paquier <michael@paquier.xyz> escreveu:On Mon, Jun 28, 2021 at 10:17:55AM +1000, Peter Smith wrote:\n> IIUC for the case ALTER_SUBSCRIPTION_DROP_PUBLICATION it looks like\n> the uninitialized copy_data local stack var would remain uninitialized\n> (undefined) still at the time it is passed at\n> AlterSubscription_refresh(sub, copy_data);\n\nYes, that's wrong.  AlterSubscription_refresh() would happily look at\nthis uninitialized value when performing a refresh with this command.\nThat's the only code path using parse_subscription_options() with this\npattern.  Applied on HEAD.Hi Michael,Thank you for this comitt.regards,Ranier Vilela", "msg_date": "Mon, 28 Jun 2021 08:08:54 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Fix uninitialized copy_data var\n (src/backend/commands/subscriptioncmds.c)" } ]
[ { "msg_contents": "Hi all\r\n\r\nIn PostgreSQL 14, The default value of shared_buffers is 128MB, but in postgresql.conf.sample, the default value of shared_buffers is 32MB.\r\n\r\nI think the following changes should be made.\r\n\r\nFile: postgresql\\src\\backend\\utils\\misc\\ postgresql.conf.sample\r\n#shared_buffers = 32MB => #shared_buffers = 128MB\r\n\r\n[PostgreSQL 14]\r\nshared_buffers (integer)\r\nSets the amount of memory the database server uses for shared memory buffers. The default is typically 128 megabytes (128MB)\r\nhttps://www.postgresql.org/docs/14/runtime-config-resource.html\r\n\r\n--------------------------------------------------------------------\r\nIn PostgreSQL 9.2, The default value of shared_buffers is 32MB.\r\n\r\n[PostgreSQL 9.2]\r\nshared_buffers (integer)\r\nSets the amount of memory the database server uses for shared memory buffers. The default is typically 32 megabytes (32MB)\r\nhttps://www.postgresql.org/docs/9.2/runtime-config-resource.html\r\n\r\nHere is a patch.\r\n\r\nBest Regards!", "msg_date": "Thu, 24 Jun 2021 06:37:48 +0000", "msg_from": "\"zhangjie2@fujitsu.com\" <zhangjie2@fujitsu.com>", "msg_from_op": true, "msg_subject": "[Patch] change the default value of shared_buffers in\n postgresql.conf.sample" }, { "msg_contents": "\"zhangjie2@fujitsu.com\" <zhangjie2@fujitsu.com> writes:\n> In PostgreSQL 14, The default value of shared_buffers is 128MB, but in postgresql.conf.sample, the default value of shared_buffers is 32MB.\n> I think the following changes should be made.\n\n> File: postgresql\\src\\backend\\utils\\misc\\ postgresql.conf.sample\n> #shared_buffers = 32MB => #shared_buffers = 128MB\n\nAs submitted, this patch breaks initdb, which is looking for the exact\nstring \"#shared_buffers = 32MB\".\n\nWe could adjust that too of course, but I'm dubious first that any\nchange is needed, and second that this is the right one:\n\n1. Since initdb will replace that string, users will never see this\nentry as-is in live databases. So is it worth doing anything?\n\n2. The *actual*, hard-wired, default in guc.c is 1024 (8MB), not\neither of these numbers. So maybe the sample file ought to use\nthat instead. Or maybe we should change that value too ... it's\nsurely as obsolete as can be.\n\nOn the whole this seems pretty cosmetic so I'm inclined to leave\nit alone. But if we were going to do anything I think that\nadjusting both initdb.c and guc.c to use 128MB might be the\nmost appropriate thing.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 24 Jun 2021 11:49:32 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [Patch] change the default value of shared_buffers in\n postgresql.conf.sample" }, { "msg_contents": "> On 24 Jun 2021, at 17:49, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n\n> ..if we were going to do anything I think that adjusting both initdb.c and\n> guc.c to use 128MB might be the most appropriate thing.\n\n\nEnsuring consistency doesn't seem like a bad thing in itself, even if it in\npractice won't make much difference.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Thu, 24 Jun 2021 17:56:39 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: [Patch] change the default value of shared_buffers in\n postgresql.conf.sample" }, { "msg_contents": "> On the whole this seems pretty cosmetic so I'm inclined to leave it alone. But if we were going to do anything I think that adjusting both initdb.c and guc.c to use 128MB might be the most appropriate thing.\r\n\r\nThank you for your suggestions. initdb.c and guc.c have been modified together.\r\n\r\nBest Regards!\r\nZhangjie\r\n\r\n-----Original Message-----\r\nFrom: Tom Lane <tgl@sss.pgh.pa.us> \r\nSent: Thursday, June 24, 2021 11:50 PM\r\nTo: Zhang, Jie/张 杰 <zhangjie2@fujitsu.com>\r\nCc: pgsql-hackers@lists.postgresql.org\r\nSubject: Re: [Patch] change the default value of shared_buffers in postgresql.conf.sample\r\n\r\n\"zhangjie2@fujitsu.com\" <zhangjie2@fujitsu.com> writes:\r\n> In PostgreSQL 14, The default value of shared_buffers is 128MB, but in postgresql.conf.sample, the default value of shared_buffers is 32MB.\r\n> I think the following changes should be made.\r\n\r\n> File: postgresql\\src\\backend\\utils\\misc\\ postgresql.conf.sample \r\n> #shared_buffers = 32MB => #shared_buffers = 128MB\r\n\r\nAs submitted, this patch breaks initdb, which is looking for the exact string \"#shared_buffers = 32MB\".\r\n\r\nWe could adjust that too of course, but I'm dubious first that any change is needed, and second that this is the right one:\r\n\r\n1. Since initdb will replace that string, users will never see this entry as-is in live databases. So is it worth doing anything?\r\n\r\n2. The *actual*, hard-wired, default in guc.c is 1024 (8MB), not either of these numbers. So maybe the sample file ought to use that instead. Or maybe we should change that value too ... it's surely as obsolete as can be.\r\n\r\nOn the whole this seems pretty cosmetic so I'm inclined to leave it alone. But if we were going to do anything I think that adjusting both initdb.c and guc.c to use 128MB might be the most appropriate thing.\r\n\r\n\t\t\tregards, tom lane", "msg_date": "Fri, 25 Jun 2021 06:15:59 +0000", "msg_from": "\"zhangjie2@fujitsu.com\" <zhangjie2@fujitsu.com>", "msg_from_op": true, "msg_subject": "RE: [Patch] change the default value of shared_buffers in\n postgresql.conf.sample" }, { "msg_contents": "On Thu, Jun 24, 2021 at 5:49 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> \"zhangjie2@fujitsu.com\" <zhangjie2@fujitsu.com> writes:\n> > In PostgreSQL 14, The default value of shared_buffers is 128MB, but in postgresql.conf.sample, the default value of shared_buffers is 32MB.\n> > I think the following changes should be made.\n>\n> > File: postgresql\\src\\backend\\utils\\misc\\ postgresql.conf.sample\n> > #shared_buffers = 32MB => #shared_buffers = 128MB\n>\n> As submitted, this patch breaks initdb, which is looking for the exact\n> string \"#shared_buffers = 32MB\".\n>\n> We could adjust that too of course, but I'm dubious first that any\n> change is needed, and second that this is the right one:\n>\n> 1. Since initdb will replace that string, users will never see this\n> entry as-is in live databases. So is it worth doing anything?\n\nIt's not entirely uncommon that users copy the .sample file into their\nconfiguration management system and then generate the real config from\nthat using templates. These users will definitely see it (and\noverwrite it).\n\n\n> 2. The *actual*, hard-wired, default in guc.c is 1024 (8MB), not\n> either of these numbers. So maybe the sample file ought to use\n> that instead. Or maybe we should change that value too ... it's\n> surely as obsolete as can be.\n\n+1 for changing this one as well. It'a always been slightly confusing,\nsince it's what shows up in pg_settings. If anything I'd consider that\nan oversight when the defaults were changed back then...\n\n\n> On the whole this seems pretty cosmetic so I'm inclined to leave\n> it alone. But if we were going to do anything I think that\n> adjusting both initdb.c and guc.c to use 128MB might be the\n> most appropriate thing.\n\nIt is mostly cosmetic, but it is cosmetic at a level that can cause at\nleast a small amount of confusion for users, so I'm definitely +1 for\ncleaning it up.\n\n-- \n Magnus Hagander\n Me: https://www.hagander.net/\n Work: https://www.redpill-linpro.com/\n\n\n", "msg_date": "Tue, 29 Jun 2021 09:28:04 +0200", "msg_from": "Magnus Hagander <magnus@hagander.net>", "msg_from_op": false, "msg_subject": "Re: [Patch] change the default value of shared_buffers in\n postgresql.conf.sample" }, { "msg_contents": "On Tue, Jun 29, 2021 at 09:28:04AM +0200, Magnus Hagander wrote:\n> > 1. Since initdb will replace that string, users will never see this\n> > entry as-is in live databases. So is it worth doing anything?\n> \n> It's not entirely uncommon that users copy the .sample file into their\n> configuration management system and then generate the real config from\n> that using templates. These users will definitely see it (and\n> overwrite it).\n> \n> \n> > 2. The *actual*, hard-wired, default in guc.c is 1024 (8MB), not\n> > either of these numbers. So maybe the sample file ought to use\n> > that instead. Or maybe we should change that value too ... it's\n> > surely as obsolete as can be.\n> \n> +1 for changing this one as well. It'a always been slightly confusing,\n> since it's what shows up in pg_settings. If anything I'd consider that\n> an oversight when the defaults were changed back then...\n> \n> \n> > On the whole this seems pretty cosmetic so I'm inclined to leave\n> > it alone. But if we were going to do anything I think that\n> > adjusting both initdb.c and guc.c to use 128MB might be the\n> > most appropriate thing.\n> \n> It is mostly cosmetic, but it is cosmetic at a level that can cause at\n> least a small amount of confusion for users, so I'm definitely +1 for\n> cleaning it up.\n\nYes, I liked this patch from June for the reasons outlined above. I\ndon't think there is any logic to why pg_settings shows 8MB and\npostgresql.conf.sample has 32MB, and this has been true since PG 10.\n\nI think the only question is whether this is a PG 15-only patch or a\npatckpatch to PG 10; I am in favor of the later.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Wed, 18 Aug 2021 13:48:44 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: [Patch] change the default value of shared_buffers in\n postgresql.conf.sample" }, { "msg_contents": "Bruce Momjian <bruce@momjian.us> writes:\n> I think the only question is whether this is a PG 15-only patch or a\n> patckpatch to PG 10; I am in favor of the later.\n\nI think you need a lot stronger argument that this is a bug\nbefore you consider back-patching user-visible behavioral\nchanges.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 18 Aug 2021 14:03:56 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [Patch] change the default value of shared_buffers in\n postgresql.conf.sample" }, { "msg_contents": "On Wed, Aug 18, 2021 at 02:03:56PM -0400, Tom Lane wrote:\n> Bruce Momjian <bruce@momjian.us> writes:\n> > I think the only question is whether this is a PG 15-only patch or a\n> > patckpatch to PG 10; I am in favor of the later.\n> \n> I think you need a lot stronger argument that this is a bug\n> before you consider back-patching user-visible behavioral\n> changes.\n\nI think the only logic to backpatching it is your statement that this is\ncosmetic, and the new cosmetic appearance is more accurate. However, if\nyou don't feel we need to backpatch, that is fine with me --- we have\ngotten very few complaints about this.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Wed, 18 Aug 2021 14:16:55 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: [Patch] change the default value of shared_buffers in\n postgresql.conf.sample" }, { "msg_contents": "On Wed, Aug 18, 2021 at 8:16 PM Bruce Momjian <bruce@momjian.us> wrote:\n>\n> On Wed, Aug 18, 2021 at 02:03:56PM -0400, Tom Lane wrote:\n> > Bruce Momjian <bruce@momjian.us> writes:\n> > > I think the only question is whether this is a PG 15-only patch or a\n> > > patckpatch to PG 10; I am in favor of the later.\n> >\n> > I think you need a lot stronger argument that this is a bug\n> > before you consider back-patching user-visible behavioral\n> > changes.\n>\n> I think the only logic to backpatching it is your statement that this is\n> cosmetic, and the new cosmetic appearance is more accurate. However, if\n> you don't feel we need to backpatch, that is fine with me --- we have\n> gotten very few complaints about this.\n\n+1 for making the change ,and +1 for making it in master only, no backpatch.\n\n-- \n Magnus Hagander\n Me: https://www.hagander.net/\n Work: https://www.redpill-linpro.com/\n\n\n", "msg_date": "Wed, 18 Aug 2021 20:27:19 +0200", "msg_from": "Magnus Hagander <magnus@hagander.net>", "msg_from_op": false, "msg_subject": "Re: [Patch] change the default value of shared_buffers in\n postgresql.conf.sample" }, { "msg_contents": "On Wed, Aug 18, 2021 at 08:27:19PM +0200, Magnus Hagander wrote:\n> On Wed, Aug 18, 2021 at 8:16 PM Bruce Momjian <bruce@momjian.us> wrote:\n> >\n> > On Wed, Aug 18, 2021 at 02:03:56PM -0400, Tom Lane wrote:\n> > > Bruce Momjian <bruce@momjian.us> writes:\n> > > > I think the only question is whether this is a PG 15-only patch or a\n> > > > patckpatch to PG 10; I am in favor of the later.\n> > >\n> > > I think you need a lot stronger argument that this is a bug\n> > > before you consider back-patching user-visible behavioral\n> > > changes.\n> >\n> > I think the only logic to backpatching it is your statement that this is\n> > cosmetic, and the new cosmetic appearance is more accurate. However, if\n> > you don't feel we need to backpatch, that is fine with me --- we have\n> > gotten very few complaints about this.\n> \n> +1 for making the change ,and +1 for making it in master only, no backpatch.\n\nPatch applied to master.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Mon, 23 Aug 2021 12:33:52 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: [Patch] change the default value of shared_buffers in\n postgresql.conf.sample" } ]
[ { "msg_contents": "As many will be aware, there is a syntactic ambiguity in the SQL \nstandard regarding the keyword UNBOUNDED. Since UNBOUNDED is a \nnon-reserved word, it could be the name of a function parameter and be \nused as an expression. There is a grammar hack to resolve such cases as \nthe keyword.\n\nI brought this issue to the SQL standard working group, and a fix has \nbeen agreed. (Since long-standing syntax obviously can't be changed, \nthe fix is basically just an additional rule saying, \"if you see this, \nit means the keyword\".) While working on that, I wrote a few test cases \nto explore this and check how PostgreSQL actually handles this. I \nfigure these test cases are worth committing so that we have a record of \nthis and future grammar refactorings can maintain the behavior.", "msg_date": "Thu, 24 Jun 2021 11:01:32 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Add tests for UNBOUNDED syntax ambiguity" }, { "msg_contents": "On 24/06/2021 12:01, Peter Eisentraut wrote:\n> As many will be aware, there is a syntactic ambiguity in the SQL\n> standard regarding the keyword UNBOUNDED. Since UNBOUNDED is a\n> non-reserved word, it could be the name of a function parameter and be\n> used as an expression. There is a grammar hack to resolve such cases as\n> the keyword.\n> \n> I brought this issue to the SQL standard working group, and a fix has\n> been agreed. (Since long-standing syntax obviously can't be changed,\n> the fix is basically just an additional rule saying, \"if you see this,\n> it means the keyword\".)\n\nNice!\n\n> While working on that, I wrote a few test cases to explore this and\n> check how PostgreSQL actually handles this. I figure these test\n> cases are worth committing so that we have a record of this and\n> future grammar refactorings can maintain the behavior.\n\n+1\n\n- Heikki\n\n\n", "msg_date": "Thu, 24 Jun 2021 16:42:20 +0300", "msg_from": "Heikki Linnakangas <hlinnaka@iki.fi>", "msg_from_op": false, "msg_subject": "Re: Add tests for UNBOUNDED syntax ambiguity" } ]
[ { "msg_contents": "Hi hackers,\n\nWe may have anti-joins in several cases. Sublinks of 'NOT EXISTS' may be\npulled up as anti-joins. Left joins whose join quals are strict for any\nnullable var that is forced null by higher qual levels will also be\nreduced to anti-joins. So anti-joins are very commonly used in practice.\n\nCurrently when populating anti-join with paths, we do not try to swap\nthe outer and inner to get both paths. That may make us miss some\ncheaper paths.\n\n# insert into foo select i, i from generate_series(1,10)i;\nINSERT 0 10\n\n# insert into bar select i, i from generate_series(1,5000000)i;\nINSERT 0 5000000\n\n# explain select * from foo left join bar on foo.a = bar.c where bar.c is\nnull;\n QUERY PLAN\n-------------------------------------------------------------------------\n Hash Anti Join (cost=154156.00..173691.19 rows=1 width=16)\n Hash Cond: (foo.a = bar.c)\n -> Seq Scan on foo (cost=0.00..1.10 rows=10 width=8)\n -> Hash (cost=72124.00..72124.00 rows=5000000 width=8)\n -> Seq Scan on bar (cost=0.00..72124.00 rows=5000000 width=8)\n(5 rows)\n\nI believe if we use the smaller table 'foo' as inner side for this\nquery, we would have a cheaper plan.\n\nSo I'm wondering whether it's worthwhile to use each rel as both outer\nand inner for anti-joins, maybe by inventing a JOIN_REVERSE_ANTI join\ntype.\n\nThanks\nRichard\n\nHi hackers,We may have anti-joins in several cases. Sublinks of 'NOT EXISTS' may bepulled up as anti-joins. Left joins whose join quals are strict for anynullable var that is forced null by higher qual levels will also bereduced to anti-joins. So anti-joins are very commonly used in practice.Currently when populating anti-join with paths, we do not try to swapthe outer and inner to get both paths. That may make us miss somecheaper paths.# insert into foo select i, i from generate_series(1,10)i;INSERT 0 10# insert into bar select i, i from generate_series(1,5000000)i;INSERT 0 5000000# explain select * from foo left join bar on foo.a = bar.c where bar.c is null;                               QUERY PLAN------------------------------------------------------------------------- Hash Anti Join  (cost=154156.00..173691.19 rows=1 width=16)   Hash Cond: (foo.a = bar.c)   ->  Seq Scan on foo  (cost=0.00..1.10 rows=10 width=8)   ->  Hash  (cost=72124.00..72124.00 rows=5000000 width=8)         ->  Seq Scan on bar  (cost=0.00..72124.00 rows=5000000 width=8)(5 rows)I believe if we use the smaller table 'foo' as inner side for thisquery, we would have a cheaper plan.So I'm wondering whether it's worthwhile to use each rel as both outerand inner for anti-joins, maybe by inventing a JOIN_REVERSE_ANTI jointype.ThanksRichard", "msg_date": "Thu, 24 Jun 2021 17:50:41 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On 24/06/2021 12:50, Richard Guo wrote:\n> Hi hackers,\n> \n> We may have anti-joins in several cases. Sublinks of 'NOT EXISTS' may be\n> pulled up as anti-joins. Left joins whose join quals are strict for any\n> nullable var that is forced null by higher qual levels will also be\n> reduced to anti-joins. So anti-joins are very commonly used in practice.\n> \n> Currently when populating anti-join with paths, we do not try to swap\n> the outer and inner to get both paths. That may make us miss some\n> cheaper paths.\n> \n> # insert into foo select i, i from generate_series(1,10)i;\n> INSERT 0 10\n> \n> # insert into bar select i, i from generate_series(1,5000000)i;\n> INSERT 0 5000000\n> \n> # explain select * from foo left join bar on foo.a = bar.c where bar.c \n> is null;\n>                                QUERY PLAN\n> -------------------------------------------------------------------------\n>  Hash Anti Join  (cost=154156.00..173691.19 rows=1 width=16)\n>    Hash Cond: (foo.a = bar.c)\n>    ->  Seq Scan on foo  (cost=0.00..1.10 rows=10 width=8)\n>    ->  Hash  (cost=72124.00..72124.00 rows=5000000 width=8)\n>          ->  Seq Scan on bar  (cost=0.00..72124.00 rows=5000000 width=8)\n> (5 rows)\n> \n> I believe if we use the smaller table 'foo' as inner side for this\n> query, we would have a cheaper plan.\n\nHow would that work?\n\n- Heikki\n\n\n", "msg_date": "Thu, 24 Jun 2021 16:28:48 +0300", "msg_from": "Heikki Linnakangas <hlinnaka@iki.fi>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "Heikki Linnakangas <hlinnaka@iki.fi> writes:\n> On 24/06/2021 12:50, Richard Guo wrote:\n>> I believe if we use the smaller table 'foo' as inner side for this\n>> query, we would have a cheaper plan.\n\n> How would that work?\n\nI think you could make it work for the hash-join case by extending\nthe existing mechanism for right joins: emit nothing during the main\nscan, but mark hashtable entries when a match is found. Then make\na post-pass and emit hash entries that never found a match. So\nbasically just the inverse behavior of a right join, but with the\nsame state info.\n\nMerge join could likely support \"right anti join\" too, though the\nbenefit of swapping inputs tends to be small there, so it may not be\nworth doing.\n\nObviously there's a pretty fair amount of code to write to make this\nhappen.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 24 Jun 2021 10:14:52 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Thu, Jun 24, 2021 at 10:14 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n\n> Heikki Linnakangas <hlinnaka@iki.fi> writes:\n> > On 24/06/2021 12:50, Richard Guo wrote:\n> >> I believe if we use the smaller table 'foo' as inner side for this\n> >> query, we would have a cheaper plan.\n>\n> > How would that work?\n>\n> I think you could make it work for the hash-join case by extending\n> the existing mechanism for right joins: emit nothing during the main\n> scan, but mark hashtable entries when a match is found. Then make\n> a post-pass and emit hash entries that never found a match. So\n> basically just the inverse behavior of a right join, but with the\n> same state info.\n>\n> Merge join could likely support \"right anti join\" too, though the\n> benefit of swapping inputs tends to be small there, so it may not be\n> worth doing.\n>\n> Obviously there's a pretty fair amount of code to write to make this\n> happen.\n>\n\nThanks for the explanation. Attached is a demo code for the hash-join\ncase, which is only for PoC to show how we can make it work. It's far\nfrom complete, at least we need to adjust the cost calculation for this\n'right anti join'.\n\nAm I going in the right direction?\n\nThanks\nRichard", "msg_date": "Sat, 26 Jun 2021 18:48:43 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "> Thanks for the explanation. Attached is a demo code for the hash-join\n> case, which is only for PoC to show how we can make it work. It's far\n> from complete, at least we need to adjust the cost calculation for this\n> 'right anti join'.\n\nI applied the patch and executed some queries. Hash Right Anti Joins\nseem to be working correctly. Though, some of the tests are failing.\nI guessed it's because the other join algorithms do not support right\nanti join, but I couldn't reproduce it.\n\nI am impressed by how simple the patch is: only 2 lines to support a\nnew join algorithm. This is a good case for the quality of Postgres\ncode. I hope supporting the other join algorithms would be similar.\n\nI am not sure how the cost estimation should differ from straight anti\njoin. It seems to me that the planner is already making the right\nchoice by taking into account the cost of the Hash node which makes\nthe whole cost greater when the inner table is much bigger.\n\nI am not an expert planner, but it feels to me like a good feature\nthat can provide better plans in some cases. Given it works correctly\nand the implementation is so simple, the only argument against it may\nbe increased planning time. I know that the planner performance is\naffected negatively by the number of join paths to consider. This may\nnot be a bigger deal as typically there are not many anti joins in a\nquery, but it'd still be a good idea to do some performance tests.\n\n\n", "msg_date": "Tue, 29 Jun 2021 10:55:17 +0300", "msg_from": "Emre Hasegeli <emre@hasegeli.com>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Tue, Jun 29, 2021 at 3:55 PM Emre Hasegeli <emre@hasegeli.com> wrote:\n\n> > Thanks for the explanation. Attached is a demo code for the hash-join\n> > case, which is only for PoC to show how we can make it work. It's far\n> > from complete, at least we need to adjust the cost calculation for this\n> > 'right anti join'.\n>\n> I applied the patch and executed some queries. Hash Right Anti Joins\n> seem to be working correctly. Though, some of the tests are failing.\n> I guessed it's because the other join algorithms do not support right\n> anti join, but I couldn't reproduce it.\n>\n\nThanks for verifying this patch.\n\n\n>\n> I am impressed by how simple the patch is: only 2 lines to support a\n> new join algorithm. This is a good case for the quality of Postgres\n> code. I hope supporting the other join algorithms would be similar.\n>\n\nYes, thanks to the excellent design pattern of the execution codes, we\nonly need very few changes to support this new join type.\n\n\n>\n> I am not sure how the cost estimation should differ from straight anti\n> join. It seems to me that the planner is already making the right\n> choice by taking into account the cost of the Hash node which makes\n> the whole cost greater when the inner table is much bigger.\n>\n\nI think we can basically use the same cost calculation as with anti\njoins, since they share the fact that the executor will stop after the\nfirst match. However, there are still some differences. Such as when we\nconsider the number of tuples that will pass the basic join, I think we\nneed to use unmatched inner rows, rather than unmatched outer rows.\n\n\n>\n> I am not an expert planner, but it feels to me like a good feature\n> that can provide better plans in some cases. Given it works correctly\n> and the implementation is so simple, the only argument against it may\n> be increased planning time. I know that the planner performance is\n> affected negatively by the number of join paths to consider. This may\n> not be a bigger deal as typically there are not many anti joins in a\n> query, but it'd still be a good idea to do some performance tests.\n>\n\nAgree. Performance tests are necessary if we consider finishing this\npatch.\n\nThanks\nRichard\n\nOn Tue, Jun 29, 2021 at 3:55 PM Emre Hasegeli <emre@hasegeli.com> wrote:> Thanks for the explanation. Attached is a demo code for the hash-join\n> case, which is only for PoC to show how we can make it work. It's far\n> from complete, at least we need to adjust the cost calculation for this\n> 'right anti join'.\n\nI applied the patch and executed some queries.  Hash Right Anti Joins\nseem to be working correctly.  Though, some of the tests are failing.\nI guessed it's because the other join algorithms do not support right\nanti join, but I couldn't reproduce it.Thanks for verifying this patch. \n\nI am impressed by how simple the patch is: only 2 lines to support a\nnew join algorithm.  This is a good case for the quality of Postgres\ncode.  I hope supporting the other join algorithms would be similar.Yes, thanks to the excellent design pattern of the execution codes, weonly need very few changes to support this new join type. \n\nI am not sure how the cost estimation should differ from straight anti\njoin.  It seems to me that the planner is already making the right\nchoice by taking into account the cost of the Hash node which makes\nthe whole cost greater when the inner table is much bigger.I think we can basically use the same cost calculation as with antijoins, since they share the fact that the executor will stop after thefirst match. However, there are still some differences. Such as when weconsider the number of tuples that will pass the basic join, I think weneed to use unmatched inner rows, rather than unmatched outer rows. \n\nI am not an expert planner, but it feels to me like a good feature\nthat can provide better plans in some cases.  Given it works correctly\nand the implementation is so simple, the only argument against it may\nbe increased planning time.  I know that the planner performance is\naffected negatively by the number of join paths to consider.  This may\nnot be a bigger deal as typically there are not many anti joins in a\nquery, but it'd still be a good idea to do some performance tests.Agree. Performance tests are necessary if we consider finishing thispatch. ThanksRichard", "msg_date": "Tue, 29 Jun 2021 16:55:59 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "Le mardi 29 juin 2021, 10:55:59 CEST Richard Guo a écrit :\n> On Tue, Jun 29, 2021 at 3:55 PM Emre Hasegeli <emre@hasegeli.com> wrote:\n> > > Thanks for the explanation. Attached is a demo code for the hash-join\n> > > case, which is only for PoC to show how we can make it work. It's far\n> > > from complete, at least we need to adjust the cost calculation for this\n> > > 'right anti join'.\n> > \n> > I applied the patch and executed some queries. Hash Right Anti Joins\n> > seem to be working correctly. Though, some of the tests are failing.\n> > I guessed it's because the other join algorithms do not support right\n> > anti join, but I couldn't reproduce it.\n> \n> Thanks for verifying this patch.\n\nI also ran the tests on this patch, and can confirm the tests are failing.\n\nThe reason for that is that you request a new outer tuple whenever we have a \nmatch, even when the outer tuple could match several tuples from the hash \ntable: we end up emitting the duplicates because we switched to another tuple \nafter the first match.\n\nYou can set up a simple test case like this:\n\ncreate table t1 (id int);\ncreate table t2 (id int);\ninsert into t1 select generate_series (1, 10000);\ninsert into t2 VALUES (1), (1), (-1), (-1);\nanalyze t1, t2;\n\nset enable_hashjoin = off;\nexplain (analyze) select * from t2 where not exists (SELECT 1 FROM t1 where \nt1.id = t2.id);\nset enable_nestloop = off;\nset enable_hashjoin = on;\nexplain (analyze) select * from t2 where not exists (SELECT 1 FROM t1 where \nt1.id = t2.id);\n\nAlso, I'm not familiar enough with the hash join algorithm to know if the \napproach of \"scanning every outer tuple, skip emitting matching inner tuples\" \nwould be correct, but this is the first problem I notice. Not getting into the \nHJ_NEED_NEW_OUTER state when the join type is JOIN_RIGHT_ANTI seems to fix this \nspecific issue tough.\n\t\n> I think we can basically use the same cost calculation as with anti\n> joins, since they share the fact that the executor will stop after the\n> first match. However, there are still some differences. Such as when we\n> consider the number of tuples that will pass the basic join, I think we\n> need to use unmatched inner rows, rather than unmatched outer rows.\n\nDue to the fact we cannot just skip at the first match, I'm not sure this works \neither.\n\n> \n> Thanks\n> Richard\n\n\n\n\n\n\n", "msg_date": "Tue, 29 Jun 2021 16:40:57 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Tue, Jun 29, 2021 at 10:41 PM Ronan Dunklau <ronan.dunklau@aiven.io>\nwrote:\n\n> Le mardi 29 juin 2021, 10:55:59 CEST Richard Guo a écrit :\n> > On Tue, Jun 29, 2021 at 3:55 PM Emre Hasegeli <emre@hasegeli.com> wrote:\n> > > > Thanks for the explanation. Attached is a demo code for the hash-join\n> > > > case, which is only for PoC to show how we can make it work. It's far\n> > > > from complete, at least we need to adjust the cost calculation for\n> this\n> > > > 'right anti join'.\n> > >\n> > > I applied the patch and executed some queries. Hash Right Anti Joins\n> > > seem to be working correctly. Though, some of the tests are failing.\n> > > I guessed it's because the other join algorithms do not support right\n> > > anti join, but I couldn't reproduce it.\n> >\n> > Thanks for verifying this patch.\n>\n> I also ran the tests on this patch, and can confirm the tests are failing.\n>\n> The reason for that is that you request a new outer tuple whenever we have\n> a\n> match, even when the outer tuple could match several tuples from the hash\n> table: we end up emitting the duplicates because we switched to another\n> tuple\n> after the first match.\n>\n\nYes, thanks! I was making a big mistake here thinking the executor can\nstop after the first match. That's not true. We need to use each outer\ntuple to find all the matches and mark the corresponding hashtable\nentries. I have updated the patch with the fix.\n\n\n> > I think we can basically use the same cost calculation as with anti\n> > joins, since they share the fact that the executor will stop after the\n> > first match. However, there are still some differences. Such as when we\n> > consider the number of tuples that will pass the basic join, I think we\n> > need to use unmatched inner rows, rather than unmatched outer rows.\n>\n> Due to the fact we cannot just skip at the first match, I'm not sure this\n> works\n> either.\n>\n\nThis is not correct any more since the fact that the executor will stop\nafter the first match does not hold true. A brief thought show me that\nwe can use the same cost calculation as with right joins.\n\nThanks\nRichard", "msg_date": "Wed, 30 Jun 2021 12:05:53 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "> Yes, thanks! I was making a big mistake here thinking the executor can\n> stop after the first match. That's not true. We need to use each outer\n> tuple to find all the matches and mark the corresponding hashtable\n> entries. I have updated the patch with the fix.\n\nIt looks OK to me.\n\n> > > I think we can basically use the same cost calculation as with anti\n> > > joins, since they share the fact that the executor will stop after the\n> > > first match. However, there are still some differences. Such as when we\n> > > consider the number of tuples that will pass the basic join, I think we\n> > > need to use unmatched inner rows, rather than unmatched outer rows.\n> > \n> > Due to the fact we cannot just skip at the first match, I'm not sure this\n> > works\n> > either.\n> \n> This is not correct any more since the fact that the executor will stop\n> after the first match does not hold true. A brief thought show me that\n> we can use the same cost calculation as with right joins.\n\nOnce you do that, you should also add test coverage for those new plans. Also \nconsider adding a commitfest entry.\n\nRegards,\n\n--\nRonan Dunklau\n\n\n\n\n\n", "msg_date": "Thu, 01 Jul 2021 09:09:38 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "Le jeudi 1 juillet 2021, 09:09:38 CEST Ronan Dunklau a écrit :\n> > Yes, thanks! I was making a big mistake here thinking the executor can\n> > stop after the first match. That's not true. We need to use each outer\n> > tuple to find all the matches and mark the corresponding hashtable\n> > entries. I have updated the patch with the fix.\n> \n> It looks OK to me.\n\nI forgot to mention: you also have failing tests due to the plans changing to \nuse the new join type. This might not be the case anymore once you update the \ncost model, but if that's the case the tests should be updated.\n\n\n\n\n", "msg_date": "Thu, 01 Jul 2021 09:18:37 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Thu, Jul 1, 2021 at 3:18 PM Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n\n> Le jeudi 1 juillet 2021, 09:09:38 CEST Ronan Dunklau a écrit :\n> > > Yes, thanks! I was making a big mistake here thinking the executor can\n> > > stop after the first match. That's not true. We need to use each outer\n> > > tuple to find all the matches and mark the corresponding hashtable\n> > > entries. I have updated the patch with the fix.\n> >\n> > It looks OK to me.\n>\n> I forgot to mention: you also have failing tests due to the plans changing\n> to\n> use the new join type. This might not be the case anymore once you update\n> the\n> cost model, but if that's the case the tests should be updated.\n>\n\nThanks! Test cases are updated in v3 patch. Also merge join can do the\n'right anti join' too in the same patch.\n\nThanks again for reviewing this patch.\n\nThanks\nRichard", "msg_date": "Fri, 2 Jul 2021 11:23:59 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Thu, Jul 1, 2021 at 8:24 PM Richard Guo <guofenglinux@gmail.com> wrote:\n\n>\n> On Thu, Jul 1, 2021 at 3:18 PM Ronan Dunklau <ronan.dunklau@aiven.io>\n> wrote:\n>\n>> Le jeudi 1 juillet 2021, 09:09:38 CEST Ronan Dunklau a écrit :\n>> > > Yes, thanks! I was making a big mistake here thinking the executor can\n>> > > stop after the first match. That's not true. We need to use each outer\n>> > > tuple to find all the matches and mark the corresponding hashtable\n>> > > entries. I have updated the patch with the fix.\n>> >\n>> > It looks OK to me.\n>>\n>> I forgot to mention: you also have failing tests due to the plans\n>> changing to\n>> use the new join type. This might not be the case anymore once you update\n>> the\n>> cost model, but if that's the case the tests should be updated.\n>>\n>\n> Thanks! Test cases are updated in v3 patch. Also merge join can do the\n> 'right anti join' too in the same patch.\n>\n> Thanks again for reviewing this patch.\n>\n> Thanks\n> Richard\n>\n>\nHi,\nMinor comment:\n+ * In a right-antijoin, we never return a matched tuple.\n\nmatched tuple -> matching tuple\n\nCheers\n\nOn Thu, Jul 1, 2021 at 8:24 PM Richard Guo <guofenglinux@gmail.com> wrote:On Thu, Jul 1, 2021 at 3:18 PM Ronan Dunklau <ronan.dunklau@aiven.io> wrote:Le jeudi 1 juillet 2021, 09:09:38 CEST Ronan Dunklau a écrit :\n> > Yes, thanks! I was making a big mistake here thinking the executor can\n> > stop after the first match. That's not true. We need to use each outer\n> > tuple to find all the matches and mark the corresponding hashtable\n> > entries. I have updated the patch with the fix.\n> \n> It looks OK to me.\n\nI forgot to mention: you also have failing tests due to the plans changing to \nuse the new join type. This might not be the case anymore once you update the \ncost model, but if that's the case the tests should be updated.Thanks! Test cases are updated in v3 patch. Also merge join can do the'right anti join' too in the same patch.Thanks again for reviewing this patch.ThanksRichard Hi,Minor comment:+                    * In a right-antijoin, we never return a matched tuple.matched tuple -> matching tupleCheers", "msg_date": "Thu, 1 Jul 2021 21:04:47 -0700", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Fri, Jul 2, 2021 at 11:59 AM Zhihong Yu <zyu@yugabyte.com> wrote:\n\n>\n>\n> On Thu, Jul 1, 2021 at 8:24 PM Richard Guo <guofenglinux@gmail.com> wrote:\n>\n>>\n>> On Thu, Jul 1, 2021 at 3:18 PM Ronan Dunklau <ronan.dunklau@aiven.io>\n>> wrote:\n>>\n>>> Le jeudi 1 juillet 2021, 09:09:38 CEST Ronan Dunklau a écrit :\n>>> > > Yes, thanks! I was making a big mistake here thinking the executor\n>>> can\n>>> > > stop after the first match. That's not true. We need to use each\n>>> outer\n>>> > > tuple to find all the matches and mark the corresponding hashtable\n>>> > > entries. I have updated the patch with the fix.\n>>> >\n>>> > It looks OK to me.\n>>>\n>>> I forgot to mention: you also have failing tests due to the plans\n>>> changing to\n>>> use the new join type. This might not be the case anymore once you\n>>> update the\n>>> cost model, but if that's the case the tests should be updated.\n>>>\n>>\n>> Thanks! Test cases are updated in v3 patch. Also merge join can do the\n>> 'right anti join' too in the same patch.\n>>\n>> Thanks again for reviewing this patch.\n>>\n>> Thanks\n>> Richard\n>>\n>>\n> Hi,\n> Minor comment:\n> + * In a right-antijoin, we never return a matched\n> tuple.\n>\n> matched tuple -> matching tuple\n>\n\nThanks for reviewing.\n\nThe comment for JOIN_ANTI in ExecHashJoinImpl/ExecMergeJoin is:\n\n\"In an antijoin, we never return a matched tuple\"\n\nSo I think we'd better keep it consistent for JOIN_RIGHT_ANTI?\n\nThanks\nRichard\n\nOn Fri, Jul 2, 2021 at 11:59 AM Zhihong Yu <zyu@yugabyte.com> wrote:On Thu, Jul 1, 2021 at 8:24 PM Richard Guo <guofenglinux@gmail.com> wrote:On Thu, Jul 1, 2021 at 3:18 PM Ronan Dunklau <ronan.dunklau@aiven.io> wrote:Le jeudi 1 juillet 2021, 09:09:38 CEST Ronan Dunklau a écrit :\n> > Yes, thanks! I was making a big mistake here thinking the executor can\n> > stop after the first match. That's not true. We need to use each outer\n> > tuple to find all the matches and mark the corresponding hashtable\n> > entries. I have updated the patch with the fix.\n> \n> It looks OK to me.\n\nI forgot to mention: you also have failing tests due to the plans changing to \nuse the new join type. This might not be the case anymore once you update the \ncost model, but if that's the case the tests should be updated.Thanks! Test cases are updated in v3 patch. Also merge join can do the'right anti join' too in the same patch.Thanks again for reviewing this patch.ThanksRichard Hi,Minor comment:+                    * In a right-antijoin, we never return a matched tuple.matched tuple -> matching tupleThanks for reviewing.The comment for JOIN_ANTI in ExecHashJoinImpl/ExecMergeJoin is:\"In an antijoin, we never return a matched tuple\"So I think we'd better keep it consistent for JOIN_RIGHT_ANTI?ThanksRichard", "msg_date": "Wed, 21 Jul 2021 17:07:28 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Fri, Jul 2, 2021 at 11:23 AM Richard Guo <guofenglinux@gmail.com> wrote:\n\n> Thanks! Test cases are updated in v3 patch. Also merge join can do the\n> 'right anti join' too in the same patch.\n>\n> Thanks again for reviewing this patch.\n>\n\nRebased this patch with latest master, with no other changes.\n\nThanks\nRichard", "msg_date": "Mon, 25 Jul 2022 10:48:33 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "Richard Guo <guofenglinux@gmail.com> writes:\n> [ v4-0001-Using-each-rel-as-both-outer-and-inner-for-anti-j.patch ]\n\nI took a quick look through this. The executor changes are indeed\nimpressively short, but that's largely because you've paid zero\nattention to updating obsoleted comments. For example, in\nnodeHashjoin.c there are lots of references to right/full joins\nthat likely now need to cover right-anti. I'm not sure that the\nempty-rel startup optimizations are correct for this case, either.\n\nI don't have a warm feeling about the planner changes being correct:\nit looks like what you mostly did was to treat JOIN_RIGHT_ANTI\nidentically to JOIN_ANTI everywhere, which is surely wrong.\nAs an example of this, optimizer/README mentions\n\n Similarly, parameterized paths do not normally get preference in add_path\n for having cheap startup cost; that's seldom of much value when on the\n inside of a nestloop, so it seems not worth keeping extra paths solely for\n that. An exception occurs for parameterized paths for the RHS relation of\n a SEMI or ANTI join: in those cases, we can stop the inner scan after the\n first match, so it's primarily startup not total cost that we care about.\n\nFor RIGHT_ANTI it'd become startup of the outer scan that counts, but\nI don't think you've gotten that right here.\n\nThere are various references to JOIN_ANTI in planner peripheral code,\ne.g. selfuncs.c, that probably need adjustment.\n\n[ wanders away wondering if JOIN_RIGHT_SEMI should become a thing ... ]\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sat, 30 Jul 2022 12:07:38 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Sun, Jul 31, 2022 at 12:07 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n\n> Richard Guo <guofenglinux@gmail.com> writes:\n> > [ v4-0001-Using-each-rel-as-both-outer-and-inner-for-anti-j.patch ]\n>\n> I took a quick look through this. The executor changes are indeed\n> impressively short, but that's largely because you've paid zero\n> attention to updating obsoleted comments. For example, in\n> nodeHashjoin.c there are lots of references to right/full joins\n> that likely now need to cover right-anti. I'm not sure that the\n> empty-rel startup optimizations are correct for this case, either.\n\n\nThanks for the review! Yeah, you're right. I neglected to update the\nrelated comments. Will do that in the new patch. For the empty-rel\nstartup optimizations, since the right-anti join also does null-fill on\ninner relation (the HJ_FILL_INNER case), I think we cannot skip building\nthe hash table even when the outer rel is completely empty.\n\n\n> I don't have a warm feeling about the planner changes being correct:\n> it looks like what you mostly did was to treat JOIN_RIGHT_ANTI\n> identically to JOIN_ANTI everywhere, which is surely wrong.\n> As an example of this, optimizer/README mentions\n>\n> Similarly, parameterized paths do not normally get preference in add_path\n> for having cheap startup cost; that's seldom of much value when on the\n> inside of a nestloop, so it seems not worth keeping extra paths solely\n> for\n> that. An exception occurs for parameterized paths for the RHS relation\n> of\n> a SEMI or ANTI join: in those cases, we can stop the inner scan after the\n> first match, so it's primarily startup not total cost that we care about.\n>\n> For RIGHT_ANTI it'd become startup of the outer scan that counts, but\n> I don't think you've gotten that right here.\n\n\nI think JOIN_RIGHT_ANTI behaves more like JOIN_RIGHT, except that\nJOIN_RIGHT returns a matched tuple while JOIN_RIGHT_ANTI does not. For\neach outer tuple, right-anti needs to scan the inner rel for every match\nand mark its hashtable entry. So I think the right-anti join should not\nbelong to the case 'in those cases, we can stop the inner scan after the\nfirst match, so it's primarily startup not total cost that we care\nabout.' Am I thinking it correctly?\n\n\n> [ wanders away wondering if JOIN_RIGHT_SEMI should become a thing ... ]\n\n\nMaybe this is something we can do. Currently for the query below:\n\n# explain select * from foo where a in (select c from bar);\n QUERY PLAN\n-------------------------------------------------------------------------\n Hash Semi Join (cost=154156.00..173691.29 rows=10 width=8)\n Hash Cond: (foo.a = bar.c)\n -> Seq Scan on foo (cost=0.00..1.10 rows=10 width=8)\n -> Hash (cost=72124.00..72124.00 rows=5000000 width=4)\n -> Seq Scan on bar (cost=0.00..72124.00 rows=5000000 width=4)\n(5 rows)\n\nI believe we can get a cheaper plan if we are able to swap the outer and\ninner for SEMI JOIN and use the smaller 'foo' as inner rel.\n\nThanks\nRichard\n\nOn Sun, Jul 31, 2022 at 12:07 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:Richard Guo <guofenglinux@gmail.com> writes:\n> [ v4-0001-Using-each-rel-as-both-outer-and-inner-for-anti-j.patch ]\n\nI took a quick look through this.  The executor changes are indeed\nimpressively short, but that's largely because you've paid zero\nattention to updating obsoleted comments.  For example, in\nnodeHashjoin.c there are lots of references to right/full joins\nthat likely now need to cover right-anti.  I'm not sure that the\nempty-rel startup optimizations are correct for this case, either.Thanks for the review! Yeah, you're right. I neglected to update therelated comments. Will do that in the new patch. For the empty-relstartup optimizations, since the right-anti join also does null-fill oninner relation (the HJ_FILL_INNER case), I think we cannot skip buildingthe hash table even when the outer rel is completely empty. \nI don't have a warm feeling about the planner changes being correct:\nit looks like what you mostly did was to treat JOIN_RIGHT_ANTI\nidentically to JOIN_ANTI everywhere, which is surely wrong.\nAs an example of this, optimizer/README mentions\n\n  Similarly, parameterized paths do not normally get preference in add_path\n  for having cheap startup cost; that's seldom of much value when on the\n  inside of a nestloop, so it seems not worth keeping extra paths solely for\n  that.  An exception occurs for parameterized paths for the RHS relation of\n  a SEMI or ANTI join: in those cases, we can stop the inner scan after the\n  first match, so it's primarily startup not total cost that we care about.\n\nFor RIGHT_ANTI it'd become startup of the outer scan that counts, but\nI don't think you've gotten that right here.I think JOIN_RIGHT_ANTI behaves more like JOIN_RIGHT, except thatJOIN_RIGHT returns a matched tuple while JOIN_RIGHT_ANTI does not. Foreach outer tuple, right-anti needs to scan the inner rel for every matchand mark its hashtable entry. So I think the right-anti join should notbelong to the case 'in those cases, we can stop the inner scan after thefirst match, so it's primarily startup not total cost that we careabout.' Am I thinking it correctly? \n[ wanders away wondering if JOIN_RIGHT_SEMI should become a thing ... ] Maybe this is something we can do. Currently for the query below:# explain select * from foo where a in (select c from bar);                               QUERY PLAN------------------------------------------------------------------------- Hash Semi Join  (cost=154156.00..173691.29 rows=10 width=8)   Hash Cond: (foo.a = bar.c)   ->  Seq Scan on foo  (cost=0.00..1.10 rows=10 width=8)   ->  Hash  (cost=72124.00..72124.00 rows=5000000 width=4)         ->  Seq Scan on bar  (cost=0.00..72124.00 rows=5000000 width=4)(5 rows)I believe we can get a cheaper plan if we are able to swap the outer andinner for SEMI JOIN and use the smaller 'foo' as inner rel.ThanksRichard", "msg_date": "Tue, 2 Aug 2022 15:13:55 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Tue, Aug 2, 2022 at 3:13 PM Richard Guo <guofenglinux@gmail.com> wrote:\n\n> On Sun, Jul 31, 2022 at 12:07 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n>> I took a quick look through this. The executor changes are indeed\n>> impressively short, but that's largely because you've paid zero\n>> attention to updating obsoleted comments. For example, in\n>> nodeHashjoin.c there are lots of references to right/full joins\n>> that likely now need to cover right-anti. I'm not sure that the\n>> empty-rel startup optimizations are correct for this case, either.\n>\n>\n> Thanks for the review! Yeah, you're right. I neglected to update the\n> related comments. Will do that in the new patch. For the empty-rel\n> startup optimizations, since the right-anti join also does null-fill on\n> inner relation (the HJ_FILL_INNER case), I think we cannot skip building\n> the hash table even when the outer rel is completely empty.\n>\n\nHere is the new patch which addresses the obsoleted comments.\n\nThanks\nRichard", "msg_date": "Tue, 9 Aug 2022 16:12:08 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "Just for kicks, I ran query in your original post under EXPLAIN ANALYZE\nin both patched and unpatched with this last version. I got this (best\nof three):\n\nUnpatched:\n55432 16devel 437532=# explain (analyze, buffers) select * from foo left join bar on foo.a = bar.c where bar.c is null;\n QUERY PLAN \n────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n Hash Anti Join (cost=159039.00..183457.23 rows=10 width=20) (actual time=482.788..483.182 rows=10 loops=1)\n Hash Cond: (foo.a = bar.c)\n Buffers: shared hit=161 read=21964, temp read=8 written=8\n -> Seq Scan on foo (cost=0.00..1.10 rows=10 width=8) (actual time=0.020..0.022 rows=10 loops=1)\n Buffers: shared hit=1\n -> Hash (cost=72124.00..72124.00 rows=5000000 width=12) (actual time=482.128..482.129 rows=0 loops=1)\n Buckets: 262144 Batches: 64 Memory Usage: 2048kB\n Buffers: shared hit=160 read=21964\n -> Seq Scan on bar (cost=0.00..72124.00 rows=5000000 width=12) (actual time=0.092..237.431 rows=5000000 loops=1)\n Buffers: shared hit=160 read=21964\n Planning Time: 0.182 ms\n Execution Time: 483.248 ms\n\n\nPatched:\n QUERY PLAN \n──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n Hash Right Anti Join (cost=1.23..90875.24 rows=10 width=20) (actual time=457.654..457.658 rows=10 loops=1)\n Hash Cond: (bar.c = foo.a)\n Buffers: shared hit=33 read=22092\n -> Seq Scan on bar (cost=0.00..72124.00 rows=5000000 width=12) (actual time=0.020..229.097 rows=5000000 loops=1)\n Buffers: shared hit=32 read=22092\n -> Hash (cost=1.10..1.10 rows=10 width=8) (actual time=0.011..0.012 rows=10 loops=1)\n Buckets: 1024 Batches: 1 Memory Usage: 9kB\n Buffers: shared hit=1\n -> Seq Scan on foo (cost=0.00..1.10 rows=10 width=8) (actual time=0.006..0.007 rows=10 loops=1)\n Buffers: shared hit=1\n Planning Time: 0.067 ms\n Execution Time: 457.679 ms\n\n\n\nI suppose this looks good as far as the plan goes, but the cost estimation\nmight be a little bit too optimistic: it is reporting that the new plan\ncosts 50% of the original, yet the execution time is only 5% lower.\n\nI wonder where does time go (in unpatched) when seqscanning finishes\nand before hashing starts.\n\n(I had to disable JIT for the first one, as it insisted on JITting tuple\ndeforming.)\n\n-- \nÁlvaro Herrera 48°01'N 7°57'E — https://www.EnterpriseDB.com/\nBob [Floyd] used to say that he was planning to get a Ph.D. by the \"green\nstamp method,\" namely by saving envelopes addressed to him as 'Dr. Floyd'.\nAfter collecting 500 such letters, he mused, a university somewhere in\nArizona would probably grant him a degree. (Don Knuth)\n\n\n", "msg_date": "Tue, 9 Aug 2022 12:54:27 +0200", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Tue, Aug 9, 2022 at 6:54 PM Alvaro Herrera <alvherre@alvh.no-ip.org>\nwrote:\n\n> I suppose this looks good as far as the plan goes, but the cost estimation\n> might be a little bit too optimistic: it is reporting that the new plan\n> costs 50% of the original, yet the execution time is only 5% lower.\n\n\nThanks for trying this patch. Yeah, the estimated cost doesn't match the\nexecution time here. I tried the query locally and here is what I got\n(best of three):\n\nUnpatched:\n# explain analyze select * from foo left join bar on foo.a = bar.c where\nbar.c is null;\n QUERY PLAN\n---------------------------------------------------------------------------------------------------------------------------\n Hash Anti Join (cost=154156.00..173691.19 rows=1 width=16) (actual\ntime=1548.622..1548.624 rows=0 loops=1)\n Hash Cond: (foo.a = bar.c)\n -> Seq Scan on foo (cost=0.00..1.10 rows=10 width=8) (actual\ntime=0.024..0.026 rows=10 loops=1)\n -> Hash (cost=72124.00..72124.00 rows=5000000 width=8) (actual\ntime=1443.157..1443.158 rows=5000000 loops=1)\n Buckets: 262144 Batches: 64 Memory Usage: 5107kB\n -> Seq Scan on bar (cost=0.00..72124.00 rows=5000000 width=8)\n(actual time=0.045..481.059 rows=5000000 loops=1)\n Planning Time: 0.262 ms\n Execution Time: 1549.138 ms\n(8 rows)\n\nPatched:\n# explain analyze select * from foo left join bar on foo.a = bar.c where\nbar.c is null;\n QUERY PLAN\n---------------------------------------------------------------------------------------------------------------------\n Hash Right Anti Join (cost=1.23..90875.33 rows=1 width=16) (actual\ntime=985.773..985.775 rows=0 loops=1)\n Hash Cond: (bar.c = foo.a)\n -> Seq Scan on bar (cost=0.00..72124.00 rows=5000000 width=8) (actual\ntime=0.095..438.333 rows=5000000 loops=1)\n -> Hash (cost=1.10..1.10 rows=10 width=8) (actual time=0.076..0.077\nrows=10 loops=1)\n Buckets: 1024 Batches: 1 Memory Usage: 9kB\n -> Seq Scan on foo (cost=0.00..1.10 rows=10 width=8) (actual\ntime=0.060..0.064 rows=10 loops=1)\n Planning Time: 0.290 ms\n Execution Time: 985.830 ms\n(8 rows)\n\nSeems the cost matches the execution time better in my local box.\n\nThe right-anti join plan has the same cost estimation with right join\nplan in this case. So would you please help to test what the right join\nplan looks like in your env for the query below?\n\n select * from foo left join bar on foo.a = bar.c;\n\nThanks\nRichard\n\nOn Tue, Aug 9, 2022 at 6:54 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\nI suppose this looks good as far as the plan goes, but the cost estimation\nmight be a little bit too optimistic: it is reporting that the new plan\ncosts 50% of the original, yet the execution time is only 5% lower.Thanks for trying this patch. Yeah, the estimated cost doesn't match theexecution time here. I tried the query locally and here is what I got(best of three):Unpatched:# explain analyze select * from foo left join bar on foo.a = bar.c where bar.c is null;                                                        QUERY PLAN--------------------------------------------------------------------------------------------------------------------------- Hash Anti Join  (cost=154156.00..173691.19 rows=1 width=16) (actual time=1548.622..1548.624 rows=0 loops=1)   Hash Cond: (foo.a = bar.c)   ->  Seq Scan on foo  (cost=0.00..1.10 rows=10 width=8) (actual time=0.024..0.026 rows=10 loops=1)   ->  Hash  (cost=72124.00..72124.00 rows=5000000 width=8) (actual time=1443.157..1443.158 rows=5000000 loops=1)         Buckets: 262144  Batches: 64  Memory Usage: 5107kB         ->  Seq Scan on bar  (cost=0.00..72124.00 rows=5000000 width=8) (actual time=0.045..481.059 rows=5000000 loops=1) Planning Time: 0.262 ms Execution Time: 1549.138 ms(8 rows)Patched:# explain analyze select * from foo left join bar on foo.a = bar.c where bar.c is null;                                                     QUERY PLAN--------------------------------------------------------------------------------------------------------------------- Hash Right Anti Join  (cost=1.23..90875.33 rows=1 width=16) (actual time=985.773..985.775 rows=0 loops=1)   Hash Cond: (bar.c = foo.a)   ->  Seq Scan on bar  (cost=0.00..72124.00 rows=5000000 width=8) (actual time=0.095..438.333 rows=5000000 loops=1)   ->  Hash  (cost=1.10..1.10 rows=10 width=8) (actual time=0.076..0.077 rows=10 loops=1)         Buckets: 1024  Batches: 1  Memory Usage: 9kB         ->  Seq Scan on foo  (cost=0.00..1.10 rows=10 width=8) (actual time=0.060..0.064 rows=10 loops=1) Planning Time: 0.290 ms Execution Time: 985.830 ms(8 rows)Seems the cost matches the execution time better in my local box.The right-anti join plan has the same cost estimation with right joinplan in this case. So would you please help to test what the right joinplan looks like in your env for the query below? select * from foo left join bar on foo.a = bar.c;ThanksRichard", "msg_date": "Wed, 10 Aug 2022 15:57:33 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On 2022-Aug-10, Richard Guo wrote:\n\n> The right-anti join plan has the same cost estimation with right join\n> plan in this case. So would you please help to test what the right join\n> plan looks like in your env for the query below?\n> \n> select * from foo left join bar on foo.a = bar.c;\n\nYou're right, it does.\n\n55432 16devel 475322=# explain (analyze, buffers) select * from foo left join bar on foo.a = bar.c;\n QUERY PLAN \n──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n Hash Right Join (cost=1.23..90875.24 rows=10 width=20) (actual time=456.410..456.415 rows=10 loops=1)\n Hash Cond: (bar.c = foo.a)\n Buffers: shared hit=15852 read=6273\n -> Seq Scan on bar (cost=0.00..72124.00 rows=5000000 width=12) (actual time=0.036..210.468 rows=5000000 loops=1)\n Buffers: shared hit=15852 read=6272\n -> Hash (cost=1.10..1.10 rows=10 width=8) (actual time=0.037..0.038 rows=10 loops=1)\n Buckets: 1024 Batches: 1 Memory Usage: 9kB\n Buffers: shared read=1\n -> Seq Scan on foo (cost=0.00..1.10 rows=10 width=8) (actual time=0.022..0.026 rows=10 loops=1)\n Buffers: shared read=1\n Planning:\n Buffers: shared hit=92 read=13\n Planning Time: 1.077 ms\n Execution Time: 456.458 ms\n(14 filas)\n\n\n55432 16devel 475322=# explain (analyze, buffers) select * from foo left join bar on foo.a = bar.c where bar.c is null;\n QUERY PLAN \n──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\n Hash Right Anti Join (cost=1.23..90875.24 rows=10 width=20) (actual time=451.747..451.751 rows=10 loops=1)\n Hash Cond: (bar.c = foo.a)\n Buffers: shared hit=15646 read=6479\n -> Seq Scan on bar (cost=0.00..72124.00 rows=5000000 width=12) (actual time=0.048..204.940 rows=5000000 loops=1)\n Buffers: shared hit=15645 read=6479\n -> Hash (cost=1.10..1.10 rows=10 width=8) (actual time=0.030..0.031 rows=10 loops=1)\n Buckets: 1024 Batches: 1 Memory Usage: 9kB\n Buffers: shared hit=1\n -> Seq Scan on foo (cost=0.00..1.10 rows=10 width=8) (actual time=0.017..0.020 rows=10 loops=1)\n Buffers: shared hit=1\n Planning Time: 0.227 ms\n Execution Time: 451.793 ms\n\n-- \nÁlvaro Herrera Breisgau, Deutschland — https://www.EnterpriseDB.com/\n\"Every machine is a smoke machine if you operate it wrong enough.\"\nhttps://twitter.com/libseybieda/status/1541673325781196801\n\n\n", "msg_date": "Wed, 10 Aug 2022 10:40:35 +0200", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Wed, Aug 10, 2022 at 4:40 PM Alvaro Herrera <alvherre@alvh.no-ip.org>\r\nwrote:\r\n\r\n> On 2022-Aug-10, Richard Guo wrote:\r\n>\r\n> > The right-anti join plan has the same cost estimation with right join\r\n> > plan in this case. So would you please help to test what the right join\r\n> > plan looks like in your env for the query below?\r\n> >\r\n> > select * from foo left join bar on foo.a = bar.c;\r\n>\r\n> You're right, it does.\r\n>\r\n> 55432 16devel 475322=# explain (analyze, buffers) select * from foo left\r\n> join bar on foo.a = bar.c;\r\n> QUERY PLAN\r\n>\r\n>\r\n> ──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\r\n> Hash Right Join (cost=1.23..90875.24 rows=10 width=20) (actual\r\n> time=456.410..456.415 rows=10 loops=1)\r\n> Hash Cond: (bar.c = foo.a)\r\n> Buffers: shared hit=15852 read=6273\r\n> -> Seq Scan on bar (cost=0.00..72124.00 rows=5000000 width=12)\r\n> (actual time=0.036..210.468 rows=5000000 loops=1)\r\n> Buffers: shared hit=15852 read=6272\r\n> -> Hash (cost=1.10..1.10 rows=10 width=8) (actual time=0.037..0.038\r\n> rows=10 loops=1)\r\n> Buckets: 1024 Batches: 1 Memory Usage: 9kB\r\n> Buffers: shared read=1\r\n> -> Seq Scan on foo (cost=0.00..1.10 rows=10 width=8) (actual\r\n> time=0.022..0.026 rows=10 loops=1)\r\n> Buffers: shared read=1\r\n> Planning:\r\n> Buffers: shared hit=92 read=13\r\n> Planning Time: 1.077 ms\r\n> Execution Time: 456.458 ms\r\n> (14 filas)\r\n\r\n\r\nThanks for help testing. Comparing the anti join plan and the right join\r\nplan, the estimated cost and the execution time mismatch a lot. Seems\r\nthe cost estimate of hashjoin path is not that precise for this case\r\neven in the unpatched codes. Maybe this is something we need to improve.\r\n\r\nThanks\r\nRichard\r\n\nOn Wed, Aug 10, 2022 at 4:40 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:On 2022-Aug-10, Richard Guo wrote:\n\r\n> The right-anti join plan has the same cost estimation with right join\r\n> plan in this case. So would you please help to test what the right join\r\n> plan looks like in your env for the query below?\r\n> \r\n>  select * from foo left join bar on foo.a = bar.c;\n\r\nYou're right, it does.\n\r\n55432 16devel 475322=# explain (analyze, buffers)  select * from foo left join bar on foo.a = bar.c;\r\n                                                      QUERY PLAN                                                      \r\n──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────\r\n Hash Right Join  (cost=1.23..90875.24 rows=10 width=20) (actual time=456.410..456.415 rows=10 loops=1)\r\n   Hash Cond: (bar.c = foo.a)\r\n   Buffers: shared hit=15852 read=6273\r\n   ->  Seq Scan on bar  (cost=0.00..72124.00 rows=5000000 width=12) (actual time=0.036..210.468 rows=5000000 loops=1)\r\n         Buffers: shared hit=15852 read=6272\r\n   ->  Hash  (cost=1.10..1.10 rows=10 width=8) (actual time=0.037..0.038 rows=10 loops=1)\r\n         Buckets: 1024  Batches: 1  Memory Usage: 9kB\r\n         Buffers: shared read=1\r\n         ->  Seq Scan on foo  (cost=0.00..1.10 rows=10 width=8) (actual time=0.022..0.026 rows=10 loops=1)\r\n               Buffers: shared read=1\r\n Planning:\r\n   Buffers: shared hit=92 read=13\r\n Planning Time: 1.077 ms\r\n Execution Time: 456.458 ms\r\n(14 filas)Thanks for help testing. Comparing the anti join plan and the right joinplan, the estimated cost and the execution time mismatch a lot. Seemsthe cost estimate of hashjoin path is not that precise for this caseeven in the unpatched codes. Maybe this is something we need to improve.ThanksRichard", "msg_date": "Thu, 11 Aug 2022 10:58:32 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "So what is the status of this patch?\n\nIt looks like you received some feedback from Emre, Tom, Ronan, and\nAlvaro but it also looks like you responded to most or all of that.\nAre you still blocked waiting for feedback? Anything specific you need\nhelp with?\n\nOr is the patch ready for commit now? In which case it would be good\nto rebase it since it's currently failing to apply. Well it would be\ngood to rebase regardless but it would be especially important if we\nwant to get it committed :)\n\n\n", "msg_date": "Tue, 14 Mar 2023 14:25:05 -0400", "msg_from": "\"Gregory Stark (as CFM)\" <stark.cfm@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Wed, Mar 15, 2023 at 2:25 AM Gregory Stark (as CFM) <stark.cfm@gmail.com>\nwrote:\n\n> So what is the status of this patch?\n>\n> It looks like you received some feedback from Emre, Tom, Ronan, and\n> Alvaro but it also looks like you responded to most or all of that.\n> Are you still blocked waiting for feedback? Anything specific you need\n> help with?\n>\n> Or is the patch ready for commit now? In which case it would be good\n> to rebase it since it's currently failing to apply. Well it would be\n> good to rebase regardless but it would be especially important if we\n> want to get it committed :)\n\n\nThanks for reminding. Attached is the rebased patch, with no other\nchanges. I think the patch is ready for commit.\n\nThanks\nRichard", "msg_date": "Wed, 15 Mar 2023 16:36:07 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "Richard Guo <guofenglinux@gmail.com> writes:\n> Thanks for reminding. Attached is the rebased patch, with no other\n> changes. I think the patch is ready for commit.\n\nPushed after a little further fooling with the comments. I also had\nto rebase it over 11c2d6fdf (Parallel Hash Full Join). I think I did\nthat correctly, but it's not clear to me whether any of the existing\ntest cases are now doing parallelized hashed right antijoins. Might\nbe worth a little more testing.\n\nI think that Alvaro's concern about incorrect cost estimates may be\nmisplaced. I couldn't find any obvious errors in the costing logic for\nthis, given that we concluded that the early-exit runtime logic cannot\napply. Also, when I try simply executing Richard's original test query\n(in a non-JIT build), the runtimes I get line up quite well ... maybe\ntoo well? ... with the cost estimates:\n\n\t\t\tv15\t\tHEAD w/patch\tRatio\n\nCost estimate\t\t173691.19\t90875.33\t0.52\nActual (best of 3)\t514.200 ms\t268.978 ms\t0.52\n\nI think the smaller differentials you guys were seeing were all about\nEXPLAIN ANALYZE overhead.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 05 Apr 2023 17:11:11 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Thu, Apr 6, 2023 at 9:11 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> Richard Guo <guofenglinux@gmail.com> writes:\n> > Thanks for reminding. Attached is the rebased patch, with no other\n> > changes. I think the patch is ready for commit.\n>\n> Pushed after a little further fooling with the comments. I also had\n> to rebase it over 11c2d6fdf (Parallel Hash Full Join). I think I did\n> that correctly, but it's not clear to me whether any of the existing\n> test cases are now doing parallelized hashed right antijoins. Might\n> be worth a little more testing.\n\nI don't see any (at least that are EXPLAINed). Wondering if we should\nadd some of these into join_hash.sql, but probably not before I figure\nout how to make that whole file run faster...\n\n> I think that Alvaro's concern about incorrect cost estimates may be\n> misplaced. I couldn't find any obvious errors in the costing logic for\n> this, given that we concluded that the early-exit runtime logic cannot\n> apply. Also, when I try simply executing Richard's original test query\n> (in a non-JIT build), the runtimes I get line up quite well ... maybe\n> too well? ... with the cost estimates:\n>\n> v15 HEAD w/patch Ratio\n>\n> Cost estimate 173691.19 90875.33 0.52\n> Actual (best of 3) 514.200 ms 268.978 ms 0.52\n>\n> I think the smaller differentials you guys were seeing were all about\n> EXPLAIN ANALYZE overhead.\n\nI tried the original example from the top of this thread and saw a\ndecent speedup from parallelism, but only if I set\nmin_parallel_table_scan_size=0, and otherwise it doesn't choose\nParallel Hash Right Anti Join. Same if I embiggen bar significantly.\nHaven't looked yet but I wonder if there is some left/right confusion\non parallel degree computation or something like that...\n\n\n", "msg_date": "Thu, 6 Apr 2023 12:17:37 +1200", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Thu, Apr 6, 2023 at 12:17 PM Thomas Munro <thomas.munro@gmail.com> wrote:\n> I tried the original example from the top of this thread and saw a\n> decent speedup from parallelism, but only if I set\n> min_parallel_table_scan_size=0, and otherwise it doesn't choose\n> Parallel Hash Right Anti Join. Same if I embiggen bar significantly.\n> Haven't looked yet but I wonder if there is some left/right confusion\n> on parallel degree computation or something like that...\n\nAhh, the problem is just that create_plain_partial_paths() doesn't\nbother to create a partial path for foo at all, because it's so small,\nso hash_inner_and_outer() can't even consider a parallel join (that\nneeds partial paths on both sides). What we want here is a shared\nhash table so we can have shared match flags, an entirely new concern,\nbut create_plain_partial_path() can't see any point in a partial scan\nof such a small table. It works if you're OK creating partial paths\nfor everything...\n\n+#if 0\n /* If any limit was set to zero, the user doesn't want a\nparallel scan. */\n if (parallel_workers <= 0)\n return;\n+#endif\n\n\n", "msg_date": "Thu, 6 Apr 2023 16:56:43 +1200", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "Thomas Munro <thomas.munro@gmail.com> writes:\n> ... It works if you're OK creating partial paths\n> for everything...\n\nHmm. The committed patch already causes us to investigate more\npaths than before, which I was okay with because it only costs\nmore if there's an antijoin involved --- which it seems like\nthere's at least a 50% chance of winning on, depending on which\ntable is bigger.\n\nThis:\n\n> +#if 0\n> /* If any limit was set to zero, the user doesn't want a\n> parallel scan. */\n> if (parallel_workers <= 0)\n> return;\n> +#endif\n\nseems like it adds a lot of new paths with a lot lower chance\nof win, but maybe we could tighten the conditions to improve\nthe odds?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 06 Apr 2023 01:05:56 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Thu, Apr 6, 2023 at 8:18 AM Thomas Munro <thomas.munro@gmail.com> wrote:\n\n> On Thu, Apr 6, 2023 at 9:11 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> > Richard Guo <guofenglinux@gmail.com> writes:\n> > > Thanks for reminding. Attached is the rebased patch, with no other\n> > > changes. I think the patch is ready for commit.\n> >\n> > Pushed after a little further fooling with the comments. I also had\n> > to rebase it over 11c2d6fdf (Parallel Hash Full Join). I think I did\n> > that correctly, but it's not clear to me whether any of the existing\n> > test cases are now doing parallelized hashed right antijoins. Might\n> > be worth a little more testing.\n>\n> I don't see any (at least that are EXPLAINed). Wondering if we should\n> add some of these into join_hash.sql, but probably not before I figure\n> out how to make that whole file run faster...\n\n\nThanks Tom for the rebase and pushing. Agreed that we need to add more\ntesting to cover Parallel Hash Right Anti Join. I tried one in\njoin_hash.sql as below\n\nexplain (costs off)\nselect count(*) from simple r right join bigger_than_it_looks s using (id)\nwhere r.id is null;\n QUERY PLAN\n---------------------------------------------------------------------\n Aggregate\n -> Gather\n Workers Planned: 2\n -> Parallel Hash Right Anti Join\n Hash Cond: (r.id = s.id)\n -> Parallel Seq Scan on simple r\n -> Parallel Hash\n -> Parallel Seq Scan on bigger_than_it_looks s\n(8 rows)\n\nBut as Thomas said, maybe we need to wait until that file becomes\nfaster.\n\nThanks\nRichard\n\nOn Thu, Apr 6, 2023 at 8:18 AM Thomas Munro <thomas.munro@gmail.com> wrote:On Thu, Apr 6, 2023 at 9:11 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> Richard Guo <guofenglinux@gmail.com> writes:\n> > Thanks for reminding.  Attached is the rebased patch, with no other\n> > changes.  I think the patch is ready for commit.\n>\n> Pushed after a little further fooling with the comments.  I also had\n> to rebase it over 11c2d6fdf (Parallel Hash Full Join).  I think I did\n> that correctly, but it's not clear to me whether any of the existing\n> test cases are now doing parallelized hashed right antijoins.  Might\n> be worth a little more testing.\n\nI don't see any (at least that are EXPLAINed).  Wondering if we should\nadd some of these into join_hash.sql, but probably not before I figure\nout how to make that whole file run faster...Thanks Tom for the rebase and pushing.  Agreed that we need to add moretesting to cover Parallel Hash Right Anti Join.  I tried one injoin_hash.sql as belowexplain (costs off)select count(*) from simple r right join bigger_than_it_looks s using (id) where r.id is null;                             QUERY PLAN--------------------------------------------------------------------- Aggregate   ->  Gather         Workers Planned: 2         ->  Parallel Hash Right Anti Join               Hash Cond: (r.id = s.id)               ->  Parallel Seq Scan on simple r               ->  Parallel Hash                     ->  Parallel Seq Scan on bigger_than_it_looks s(8 rows)But as Thomas said, maybe we need to wait until that file becomesfaster.ThanksRichard", "msg_date": "Thu, 6 Apr 2023 14:37:03 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Thu, Apr 6, 2023 at 1:06 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n\n> This:\n>\n> > +#if 0\n> > /* If any limit was set to zero, the user doesn't want a\n> > parallel scan. */\n> > if (parallel_workers <= 0)\n> > return;\n> > +#endif\n>\n> seems like it adds a lot of new paths with a lot lower chance\n> of win, but maybe we could tighten the conditions to improve\n> the odds?\n\n\nSeems it wins if the parallel scan becomes part of a hash join in final\nplan. I wonder if we have a way to know that in this early stage.\n\nBTW, zero parallel_workers seems would break some later assumptions, so\nwe may need to give it a meaningful number if we want to do in this way.\n\nThanks\nRichard\n\nOn Thu, Apr 6, 2023 at 1:06 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\nThis:\n\n> +#if 0\n>         /* If any limit was set to zero, the user doesn't want a\n> parallel scan. */\n>         if (parallel_workers <= 0)\n>                 return;\n> +#endif\n\nseems like it adds a lot of new paths with a lot lower chance\nof win, but maybe we could tighten the conditions to improve\nthe odds?Seems it wins if the parallel scan becomes part of a hash join in finalplan.  I wonder if we have a way to know that in this early stage.BTW, zero parallel_workers seems would break some later assumptions, sowe may need to give it a meaningful number if we want to do in this way.ThanksRichard", "msg_date": "Thu, 6 Apr 2023 14:40:15 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Thu, Apr 6, 2023 at 6:40 PM Richard Guo <guofenglinux@gmail.com> wrote:\n> Seems it wins if the parallel scan becomes part of a hash join in final\n> plan. I wonder if we have a way to know that in this early stage.\n\nI haven't tried but I'm not sure off the top of my head how to make a\ndecision that early unless it's super coarse grained, like is there an\nanti join in here somewhere...\n\nGenerally, this seems to be a consequence of our parallel query\nplanner design where parallel paths are generated separately and\ncompete on cost, as foretold by Hong and Stonebraker[1]. It's going\nto be hard to prune those early without missing out on some good\nplans, if you don't have a crystal ball.\n\nI wondered for a while what horrible technical problems would come up\nif we could defer creation of paths, so partial_pathlist is empty but\na new RelOptInfo flag says \"you can call\nfinish_the_partial_pathlist_please(root, rel)) if you really want\none\". We could try to be sparing about calling it that so we don't\nfinish up creating them all. That would at least move the\nshould-I-bother-to-make-this-path? logic close to the place with the\nknowledge that it'd be useful, in this case the inner side of a\nparallel hash join. One problem is that you have to get a\nparallel_workers number from somewhere to make a partial path. The\nhash join path code knows what its own parallel_workers number will be\n(it inherits it from the outer path, though I can't immediately think\nof any good reason why it shouldn't be Max(inner, outer)), but if we\nwere to feed that into a created-on-demand inner path that is then\ncached, we'd have a horrible ordering dependency (some other join in\nthe query gets planned first with a different parallel_workers number\nand it gets cached differently). Yuck. As you noted, 0 isn't a great\nnumber either, but it leads to a another thought...\n\nI wondered if we could somehow use the complete (non-partial) path\ndirectly in some cases here, if certain conditions are met. Aside\nfrom any other technical problems, you might ask \"but what about the\nestimates/costing we already did for that path\"; well the numbers are\nusually wrong anyway! We have complete paths, and partial paths for\narbitrary parallel_workers numbers that bubbled up from our scan\nsize-based heuristics. Every time we combine more than one partial\npath, we have to handle non-matching \"parallel degree\" (I'm using that\nword to mean the result of get_parallel_divisor(path), a lightly\ngrilled version of parallel_workers that makes dubious assumptions,\nbut I digress). Concretely, parallel append and parallel hash join\nalready rescale some of their input variables to match their own\nnominal parallel degree (ugh, I see a few things we should fix in that\ncode, but this email is long enough already). I wonder if we might\nneed some infrastructure to formalise that sort of thing. For\nexample, look at the row estimates in the EXPLAIN of parallel append\nover tables with parallel_workers explicitly set to different numbers\nusing ALTER TABLE. They are wrong (they're based on different\nparallel degrees; turn off parallel_leader_participation to make the\narithmetic easier to follow), while the append node itself has\nrescaled them and has a good row estimate for its own nominal parallel\ndegree, which in turn might be wrong depending on what is above.\nPerhaps EXPLAIN and everything else should use some common\ninfrastructure to deal with this.\n\nIn other words, we avoid the need for a try-every-parallel-degree\nexplosion by rescaling from some arbitrary input degree to some\narbitrary output degree, but we can't go all the way and do the\ntwo-phase Hong thing and rescale from non-partial paths in general\n(for various technical reasons that apply to more complex nodes, but\nnot to basic scans). From where we are, I'm not sure how much of a\nbig step it is to (1) formalise the path rescaling system and (2) be\nable to rescale some qualifying simple complete paths too, if needed\nfor places like this.\n\nOf course you could quibble with the concept of linear scaling of\nvarious number by parallel degrees; various things aren't linear or\neven continuous (probably why Hong's system included hash join\nthresholds). Even the concept of get_parallel_divisor(path) as\napplied to row estimates is suspect, because it assumes that the\nplanned workers will show up; if a smaller number shows up (at all,\ndue to max_parallel_workers, or just to this node because a higher\nlevel parallel append sent workers to different subplans) then a node\nmight receive many more input tuples than expected and blow through\nwork_mem, even if all estimates were 100% perfect. I have no idea\nwhat to do about that. At least *that* problem doesn't apply to\nparallel hash, which shares the memory for the number of planned\nworkers, even if fewer show up, but that ideas isn't without critics\neither.\n\nI dunno. Sorry for the wall of text/ideas. I see unfinished business\nin every direction.\n\n[1] https://www.postgresql.org/message-id/flat/CA%2BhUKGL-Fo9mZyFK1tdmzFng2puRBrgROsCiB1%3Dn7wP79mTZ%2Bg%40mail.gmail.com\n\n\n", "msg_date": "Fri, 7 Apr 2023 10:44:20 +1200", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Tue, Aug 2, 2022 at 3:13 PM Richard Guo <guofenglinux@gmail.com> wrote:\n\n> On Sun, Jul 31, 2022 at 12:07 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n>> [ wanders away wondering if JOIN_RIGHT_SEMI should become a thing ... ]\n>\n> Maybe this is something we can do. Currently for the query below:\n>\n> # explain select * from foo where a in (select c from bar);\n> QUERY PLAN\n> -------------------------------------------------------------------------\n> Hash Semi Join (cost=154156.00..173691.29 rows=10 width=8)\n> Hash Cond: (foo.a = bar.c)\n> -> Seq Scan on foo (cost=0.00..1.10 rows=10 width=8)\n> -> Hash (cost=72124.00..72124.00 rows=5000000 width=4)\n> -> Seq Scan on bar (cost=0.00..72124.00 rows=5000000 width=4)\n> (5 rows)\n>\n> I believe we can get a cheaper plan if we are able to swap the outer and\n> inner for SEMI JOIN and use the smaller 'foo' as inner rel.\n>\n\nI'm thinking about the JOIN_RIGHT_SEMI thing and it seems that it can be\nimplemented for HashJoin with very short change. What we want to do is\nto just have the first match for each inner tuple. So after scanning\nthe hash bucket for matches, we just need to check whether the inner\ntuple has been set match and skip it if so, something like\n\n {\n if (!ExecScanHashBucket(node, econtext))\n {\n /* out of matches; check for possible outer-join fill */\n node->hj_JoinState = HJ_FILL_OUTER_TUPLE;\n continue;\n }\n }\n\n+ /*\n+ * In a right-semijoin, we only need the first match for each\n+ * inner tuple.\n+ */\n+ if (node->js.jointype == JOIN_RIGHT_SEMI &&\n+ HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(node->hj_CurTuple)))\n+ continue;\n+\n\nI have a simple implementation locally and tried it with the query below\nand saw a speedup of 2055.617 ms VS. 1156.772 ms (both best of 3).\n\n# explain (costs off, analyze)\nselect * from foo where a in (select c from bar);\n QUERY PLAN\n-------------------------------------------------------------------------------\n Hash Semi Join (actual time=1957.748..2055.058 rows=10 loops=1)\n Hash Cond: (foo.a = bar.c)\n -> Seq Scan on foo (actual time=0.026..0.029 rows=10 loops=1)\n -> Hash (actual time=1938.818..1938.819 rows=5000000 loops=1)\n Buckets: 262144 Batches: 64 Memory Usage: 4802kB\n -> Seq Scan on bar (actual time=0.016..853.010 rows=5000000\nloops=1)\n Planning Time: 0.327 ms\n Execution Time: 2055.617 ms\n(8 rows)\n\n# explain (costs off, analyze)\nselect * from foo where a in (select c from bar);\n QUERY PLAN\n-------------------------------------------------------------------------\n Hash Right Semi Join (actual time=11.525..1156.713 rows=10 loops=1)\n Hash Cond: (bar.c = foo.a)\n -> Seq Scan on bar (actual time=0.034..523.036 rows=5000000 loops=1)\n -> Hash (actual time=0.027..0.029 rows=10 loops=1)\n Buckets: 1024 Batches: 1 Memory Usage: 9kB\n -> Seq Scan on foo (actual time=0.009..0.014 rows=10 loops=1)\n Planning Time: 0.312 ms\n Execution Time: 1156.772 ms\n(8 rows)\n\nIt may not be easy for MergeJoin and NestLoop though, as we do not have\na way to know if an inner tuple has been already matched or not. But\nthe benefit of swapping inputs for MergeJoin and NestLoop seems to be\nsmall, so I think it's OK to ignore them.\n\nSo is it worthwhile to make JOIN_RIGHT_SEMI come true?\n\nThanks\nRichard\n\nOn Tue, Aug 2, 2022 at 3:13 PM Richard Guo <guofenglinux@gmail.com> wrote:On Sun, Jul 31, 2022 at 12:07 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n[ wanders away wondering if JOIN_RIGHT_SEMI should become a thing ... ]Maybe this is something we can do. Currently for the query below:# explain select * from foo where a in (select c from bar);                               QUERY PLAN------------------------------------------------------------------------- Hash Semi Join  (cost=154156.00..173691.29 rows=10 width=8)   Hash Cond: (foo.a = bar.c)   ->  Seq Scan on foo  (cost=0.00..1.10 rows=10 width=8)   ->  Hash  (cost=72124.00..72124.00 rows=5000000 width=4)         ->  Seq Scan on bar  (cost=0.00..72124.00 rows=5000000 width=4)(5 rows)I believe we can get a cheaper plan if we are able to swap the outer andinner for SEMI JOIN and use the smaller 'foo' as inner rel.I'm thinking about the JOIN_RIGHT_SEMI thing and it seems that it can beimplemented for HashJoin with very short change.  What we want to do isto just have the first match for each inner tuple.  So after scanningthe hash bucket for matches, we just need to check whether the innertuple has been set match and skip it if so, something like      {          if (!ExecScanHashBucket(node, econtext))          {              /* out of matches; check for possible outer-join fill */              node->hj_JoinState = HJ_FILL_OUTER_TUPLE;              continue;          }      }+     /*+      * In a right-semijoin, we only need the first match for each+      * inner tuple.+      */+     if (node->js.jointype == JOIN_RIGHT_SEMI &&+         HeapTupleHeaderHasMatch(HJTUPLE_MINTUPLE(node->hj_CurTuple)))+         continue;+I have a simple implementation locally and tried it with the query belowand saw a speedup of 2055.617 ms VS. 1156.772 ms (both best of 3).# explain (costs off, analyze)select * from foo where a in (select c from bar);                                  QUERY PLAN------------------------------------------------------------------------------- Hash Semi Join (actual time=1957.748..2055.058 rows=10 loops=1)   Hash Cond: (foo.a = bar.c)   ->  Seq Scan on foo (actual time=0.026..0.029 rows=10 loops=1)   ->  Hash (actual time=1938.818..1938.819 rows=5000000 loops=1)         Buckets: 262144  Batches: 64  Memory Usage: 4802kB         ->  Seq Scan on bar (actual time=0.016..853.010 rows=5000000 loops=1) Planning Time: 0.327 ms Execution Time: 2055.617 ms(8 rows)# explain (costs off, analyze)select * from foo where a in (select c from bar);                               QUERY PLAN------------------------------------------------------------------------- Hash Right Semi Join (actual time=11.525..1156.713 rows=10 loops=1)   Hash Cond: (bar.c = foo.a)   ->  Seq Scan on bar (actual time=0.034..523.036 rows=5000000 loops=1)   ->  Hash (actual time=0.027..0.029 rows=10 loops=1)         Buckets: 1024  Batches: 1  Memory Usage: 9kB         ->  Seq Scan on foo (actual time=0.009..0.014 rows=10 loops=1) Planning Time: 0.312 ms Execution Time: 1156.772 ms(8 rows)It may not be easy for MergeJoin and NestLoop though, as we do not havea way to know if an inner tuple has been already matched or not.  Butthe benefit of swapping inputs for MergeJoin and NestLoop seems to besmall, so I think it's OK to ignore them.So is it worthwhile to make JOIN_RIGHT_SEMI come true?ThanksRichard", "msg_date": "Fri, 7 Apr 2023 15:28:46 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" }, { "msg_contents": "On Fri, Apr 7, 2023 at 3:28 PM Richard Guo <guofenglinux@gmail.com> wrote:\n\n> On Tue, Aug 2, 2022 at 3:13 PM Richard Guo <guofenglinux@gmail.com> wrote:\n>\n>> On Sun, Jul 31, 2022 at 12:07 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>>\n>>> [ wanders away wondering if JOIN_RIGHT_SEMI should become a thing ... ]\n>>\n>> Maybe this is something we can do. Currently for the query below:\n>>\n>> # explain select * from foo where a in (select c from bar);\n>> QUERY PLAN\n>> -------------------------------------------------------------------------\n>> Hash Semi Join (cost=154156.00..173691.29 rows=10 width=8)\n>> Hash Cond: (foo.a = bar.c)\n>> -> Seq Scan on foo (cost=0.00..1.10 rows=10 width=8)\n>> -> Hash (cost=72124.00..72124.00 rows=5000000 width=4)\n>> -> Seq Scan on bar (cost=0.00..72124.00 rows=5000000 width=4)\n>> (5 rows)\n>>\n>> I believe we can get a cheaper plan if we are able to swap the outer and\n>> inner for SEMI JOIN and use the smaller 'foo' as inner rel.\n>>\n> It may not be easy for MergeJoin and NestLoop though, as we do not have\n> a way to know if an inner tuple has been already matched or not. But\n> the benefit of swapping inputs for MergeJoin and NestLoop seems to be\n> small, so I think it's OK to ignore them.\n>\n\nHmm. Actually we can do it for MergeJoin by avoiding restoring inner\nscan to the marked tuple in EXEC_MJ_TESTOUTER, in the case when new\nouter tuple == marked tuple. But I'm not sure how much benefit we can\nget from Merge Right Semi Join.\n\nFor HashJoin, though, there are cases that can surely benefit from Hash\nRight Semi Join. So I go ahead and have a try on it as attached.\n\nThanks\nRichard", "msg_date": "Mon, 10 Apr 2023 17:21:08 +0800", "msg_from": "Richard Guo <guofenglinux@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Using each rel as both outer and inner for JOIN_ANTI" } ]
[ { "msg_contents": "Hey,\n\nI was translating logicaldecoding.sgml, and I saw this sentence:\n\nThere are multiple required streaming callbacks\n(<function>stream_start_cb</function>, <function>stream_stop_cb</function>,\n<function>stream_abort_cb</function>, <function>stream_commit_cb</function>\nand <function>stream_change_cb</function>) and two optional callbacks\n(<function>stream_message_cb</function>) and\n(<function>stream_truncate_cb</function>).\n\nThe two last sets of parentheses seem really weird to me. Looks like it\nshould be:\n(<function>stream_message_cb</function> and\n<function>stream_truncate_cb</function>).\n\nReally tiny patch attached to fix this if it really is wrong, and anyone\ncares enough to fix it :)\n\nRegards.\n\n\n-- \nGuillaume.", "msg_date": "Thu, 24 Jun 2021 13:57:09 +0200", "msg_from": "Guillaume Lelarge <guillaume@lelarge.info>", "msg_from_op": true, "msg_subject": "Weird use of parentheses in the manual" }, { "msg_contents": "On Thu, Jun 24, 2021 at 5:27 PM Guillaume Lelarge\n<guillaume@lelarge.info> wrote:\n>\n> Really tiny patch attached to fix this if it really is wrong, and anyone cares enough to fix it :)\n>\n\nLGTM. I'll take care of this tomorrow unless someone else has any\nsuggestions in this regard.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 24 Jun 2021 18:38:22 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Weird use of parentheses in the manual" }, { "msg_contents": "On Thu, Jun 24, 2021 at 6:38 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Thu, Jun 24, 2021 at 5:27 PM Guillaume Lelarge\n> <guillaume@lelarge.info> wrote:\n> >\n> > Really tiny patch attached to fix this if it really is wrong, and anyone cares enough to fix it :)\n> >\n>\n> LGTM. I'll take care of this tomorrow unless someone else has any\n> suggestions in this regard.\n>\n\nPushed.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 25 Jun 2021 12:25:15 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Weird use of parentheses in the manual" }, { "msg_contents": "Le ven. 25 juin 2021 à 08:55, Amit Kapila <amit.kapila16@gmail.com> a\nécrit :\n\n> On Thu, Jun 24, 2021 at 6:38 PM Amit Kapila <amit.kapila16@gmail.com>\n> wrote:\n> >\n> > On Thu, Jun 24, 2021 at 5:27 PM Guillaume Lelarge\n> > <guillaume@lelarge.info> wrote:\n> > >\n> > > Really tiny patch attached to fix this if it really is wrong, and\n> anyone cares enough to fix it :)\n> > >\n> >\n> > LGTM. I'll take care of this tomorrow unless someone else has any\n> > suggestions in this regard.\n> >\n>\n> Pushed.\n>\n>\nThanks.\n\n\n-- \nGuillaume.\n\nLe ven. 25 juin 2021 à 08:55, Amit Kapila <amit.kapila16@gmail.com> a écrit :On Thu, Jun 24, 2021 at 6:38 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Thu, Jun 24, 2021 at 5:27 PM Guillaume Lelarge\n> <guillaume@lelarge.info> wrote:\n> >\n> > Really tiny patch attached to fix this if it really is wrong, and anyone cares enough to fix it :)\n> >\n>\n> LGTM. I'll take care of this tomorrow unless someone else has any\n> suggestions in this regard.\n>\n\nPushed.\nThanks.-- Guillaume.", "msg_date": "Fri, 25 Jun 2021 09:02:48 +0200", "msg_from": "Guillaume Lelarge <guillaume@lelarge.info>", "msg_from_op": true, "msg_subject": "Re: Weird use of parentheses in the manual" } ]
[ { "msg_contents": "COPY FREEZE throws ERRORs if you use it inappropriately. This makes it\nvery hard to use in practice.\n\nERRORs should be replaced by NOTICEs, or just silence. That treats\nFREEZE as an optional performance tweak, which is more appropriate for\nits use case.\n\nIn particular, the need to throw ERRORs leads us to disallow the\nFREEZE option for partitioned tables. It would be better to test each\npartition to see if the optimization can be applied at the point we\nopen each partition. If we can, good. If not, no worries.\n\nIf we agree, I can code a patch to do this.\n\nRelaxing this will make it easier to add support for FREEZE into pg_restore.\n\nThanks\n\n-- \nSimon Riggs http://www.EnterpriseDB.com/\n\n\n", "msg_date": "Thu, 24 Jun 2021 15:03:39 +0100", "msg_from": "Simon Riggs <simon.riggs@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Relaxing COPY FREEZE restrictions" } ]
[ { "msg_contents": "pgbench -i should use COPY FREEZE, patch attached.\n\n-- \nSimon Riggs http://www.EnterpriseDB.com/", "msg_date": "Thu, 24 Jun 2021 15:03:43 +0100", "msg_from": "Simon Riggs <simon.riggs@enterprisedb.com>", "msg_from_op": true, "msg_subject": "pgbench using COPY FREEZE" }, { "msg_contents": "\nHello Simon,\n\nIndeed.\n\nThere is already a \"ready\" patch in the queue, see:\n\n \thttps://commitfest.postgresql.org/33/3034/\n\n-- \nFabien.\n\n\n", "msg_date": "Thu, 24 Jun 2021 20:15:02 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: pgbench using COPY FREEZE" }, { "msg_contents": "On Thu, Jun 24, 2021 at 7:15 PM Fabien COELHO <coelho@cri.ensmp.fr> wrote:\n>\n>\n> Hello Simon,\n>\n> Indeed.\n>\n> There is already a \"ready\" patch in the queue, see:\n>\n> https://commitfest.postgresql.org/33/3034/\n\nAh, my bad. I withdraw this patch, apologies Tatsuo-san.\n\n-- \nSimon Riggs http://www.EnterpriseDB.com/\n\n\n", "msg_date": "Thu, 1 Jul 2021 17:02:50 +0100", "msg_from": "Simon Riggs <simon.riggs@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Re: pgbench using COPY FREEZE" }, { "msg_contents": "Hi Simon,\n\n>> Hello Simon,\n>>\n>> Indeed.\n>>\n>> There is already a \"ready\" patch in the queue, see:\n>>\n>> https://commitfest.postgresql.org/33/3034/\n> \n> Ah, my bad. I withdraw this patch, apologies Tatsuo-san.\n\nNo problem at all. Thank you for looking into the issue.\n--\nTatsuo Ishii\nSRA OSS, Inc. Japan\nEnglish: http://www.sraoss.co.jp/index_en.php\nJapanese:http://www.sraoss.co.jp\n\n\n", "msg_date": "Sun, 04 Jul 2021 10:53:48 +0900 (JST)", "msg_from": "Tatsuo Ishii <ishii@sraoss.co.jp>", "msg_from_op": false, "msg_subject": "Re: pgbench using COPY FREEZE" } ]
[ { "msg_contents": "Fix pattern matching logic for logs in TAP tests of pgbench\n\nThe logic checking for the format of per-thread logs used grep() with\ndirectly \"$re\", which would cause the test to consider all the logs as\na match without caring about their format at all. Using \"/$re/\" makes\ngrep() perform a regex test, which is what we want here.\n\nWhile on it, improve some of the tests to be more picky with the\npatterns expected and add more comments to describe the tests.\n\nIssue discovered while digging into a separate patch.\n\nAuthor: Fabien Coelho, Michael Paquier\nDiscussion: https://postgr.es/m/YNPsPAUoVDCpPOGk@paquier.xyz\nBackpatch-through: 11\n\nBranch\n------\nmaster\n\nDetails\n-------\nhttps://git.postgresql.org/pg/commitdiff/c13585fe9e55813cf9feac67fe7b65d3a78fff92\n\nModified Files\n--------------\nsrc/bin/pgbench/t/001_pgbench_with_server.pl | 23 ++++++++++++++---------\n1 file changed, 14 insertions(+), 9 deletions(-)", "msg_date": "Thu, 24 Jun 2021 21:54:51 +0000", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": true, "msg_subject": "pgsql: Fix pattern matching logic for logs in TAP tests of pgbench" }, { "msg_contents": "On Thu, Jun 24, 2021 at 09:54:51PM +0000, Michael Paquier wrote:\n> Fix pattern matching logic for logs in TAP tests of pgbench\n> \n> The logic checking for the format of per-thread logs used grep() with\n> directly \"$re\", which would cause the test to consider all the logs as\n> a match without caring about their format at all. Using \"/$re/\" makes\n> grep() perform a regex test, which is what we want here.\n> \n> While on it, improve some of the tests to be more picky with the\n> patterns expected and add more comments to describe the tests.\n> \n> Issue discovered while digging into a separate patch.\n\nfairywren does not like that:\nhttps://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=fairywren&dt=2021-06-24%2022%3A36%3A14\n\nnot ok 302 - transaction format for 001_pgbench_log_2\nnot ok 311 - transaction format for 001_pgbench_log_3\n\nI am not sure if this is a pre-existing bug in pgbench itself\nregarding the way we generate the logs or an issue with fairywren's\nperl installation.\n\nAndrew, what's the format of the per-thread logs generated on this\nhost when running the commands? I'd bet that the checks for the\nclient IDs are right, but that some parts of the regex are too picky\nwhen it comes to this host.\n--\nMichael", "msg_date": "Fri, 25 Jun 2021 08:51:19 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": true, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "(Forgot to add Andrew in CC, now done)\n\nOn Fri, Jun 25, 2021 at 08:51:19AM +0900, Michael Paquier wrote:\n> Andrew, what's the format of the per-thread logs generated on this\n> host when running the commands? I'd bet that the checks for the\n> client IDs are right, but that some parts of the regex are too picky\n> when it comes to this host.\n\nI have tested that on my own Windows host using MSVC and Active Perl,\nand those tests pass. I am not sure what's happening here. A simple\nway to figure out what's going on would be to make the tests more\ntalkative and show up the logs that don't match.\n--\nMichael", "msg_date": "Fri, 25 Jun 2021 09:26:21 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": true, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "\nOn 6/24/21 8:26 PM, Michael Paquier wrote:\n> (Forgot to add Andrew in CC, now done)\n>\n> On Fri, Jun 25, 2021 at 08:51:19AM +0900, Michael Paquier wrote:\n>> Andrew, what's the format of the per-thread logs generated on this\n>> host when running the commands? I'd bet that the checks for the\n>> client IDs are right, but that some parts of the regex are too picky\n>> when it comes to this host.\n> I have tested that on my own Windows host using MSVC and Active Perl,\n> and those tests pass. I am not sure what's happening here. A simple\n> way to figure out what's going on would be to make the tests more\n> talkative and show up the logs that don't match.\n\n\n\nThat's not really an equivalent test. I'm taking a look\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Thu, 24 Jun 2021 21:36:53 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "On Thu, Jun 24, 2021 at 09:36:53PM -0400, Andrew Dunstan wrote:\n> That's not really an equivalent test. I'm taking a look\n\nThanks!\n--\nMichael", "msg_date": "Fri, 25 Jun 2021 10:53:59 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": true, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "\nOn 6/24/21 9:53 PM, Michael Paquier wrote:\n> On Thu, Jun 24, 2021 at 09:36:53PM -0400, Andrew Dunstan wrote:\n>> That's not really an equivalent test. I'm taking a look\n> Thanks!\n\n\n\n\nThere's a whole lot wrong with this code. To start with, why is that\nunchecked eval there. And why is it reading in log files on its own\ninstead of using TestLib::slurp_file, which, among other things,\nnormalizes line endings? There's a very good chance that this latter is\nthe issue. It only affects msys which is why you didn't see an issue on\nMSVC. And also, why does it carefully unlink the log files so that any\ntrace of what's gone wrong is deleted?\n\n\nBased on the little I've seen this file needs a serious code review.\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Thu, 24 Jun 2021 22:12:44 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "\nOn 6/24/21 10:12 PM, Andrew Dunstan wrote:\n> On 6/24/21 9:53 PM, Michael Paquier wrote:\n>> On Thu, Jun 24, 2021 at 09:36:53PM -0400, Andrew Dunstan wrote:\n>>> That's not really an equivalent test. I'm taking a look\n>> Thanks!\n>\n>\n>\n> There's a whole lot wrong with this code. To start with, why is that\n> unchecked eval there. And why is it reading in log files on its own\n> instead of using TestLib::slurp_file, which, among other things,\n> normalizes line endings? There's a very good chance that this latter is\n> the issue. It only affects msys which is why you didn't see an issue on\n> MSVC. And also, why does it carefully unlink the log files so that any\n> trace of what's gone wrong is deleted?\n>\n>\n> Based on the little I've seen this file needs a serious code review.\n\n\n\n... and there's the error:\n\n\ncheck_pgbench_logs($bdir, '001_pgbench_log_2', 1, 8, 92,\n��� qr{^[01] \\d{1,2} \\d+ \\d \\d+ \\d+$});\n\n\nand one further down the same.\n\n\nSince the file isn't read in using slurp_file, that $ won't match\nbecause the lines will end \\r\\n instead of \\n.\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Thu, 24 Jun 2021 22:26:18 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "On Thu, Jun 24, 2021 at 10:26:18PM -0400, Andrew Dunstan wrote:\n> Since the file isn't read in using slurp_file, that $ won't match\n> because the lines will end \\r\\n instead of \\n.\n\nI did not remember this one with Msys, thanks. I am not sure that\nthere is any need for an eval block here actually once you remove\nopen()? What do you think about something like the attached?\n--\nMichael", "msg_date": "Fri, 25 Jun 2021 12:08:29 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": true, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "\nOn 6/24/21 11:08 PM, Michael Paquier wrote:\n> On Thu, Jun 24, 2021 at 10:26:18PM -0400, Andrew Dunstan wrote:\n>> Since the file isn't read in using slurp_file, that $ won't match\n>> because the lines will end \\r\\n instead of \\n.\n> I did not remember this one with Msys, thanks. I am not sure that\n> there is any need for an eval block here actually once you remove\n> open()? What do you think about something like the attached?\n\n\n+��� ��� # On Msys, filter out any CRLF.\n+��� ��� $contents_raw =~ s/\\r\\n/\\n/g if $Config{osname} eq 'msys';\n\nThis is completely redundant. The whole point is that slurp_file does\nexactly this for you.\n\n\n+��� ��� my @contents = split(\"\\n\", $contents_raw);\n\nProbably more idiomatic to write split(/\\n/,$contents_raw), or\nsplit(/^/, $contents_raw) if you want to keep the line feeds.\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Fri, 25 Jun 2021 06:12:09 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "On Fri, Jun 25, 2021 at 06:12:09AM -0400, Andrew Dunstan wrote:\n> +        # On Msys, filter out any CRLF.\n> +        $contents_raw =~ s/\\r\\n/\\n/g if $Config{osname} eq 'msys';\n> \n> This is completely redundant. The whole point is that slurp_file does\n> exactly this for you.\n\nThanks. I have managed to duplicate that.\n\n> +        my @contents = split(\"\\n\", $contents_raw);\n> \n> Probably more idiomatic to write split(/\\n/,$contents_raw), or\n> split(/^/, $contents_raw) if you want to keep the line feeds.\n\nI have gone with the solution that removes the newlines. This does\nnot change the pattern checks, and that makes printing entries not\nmatching a bit cleaner.\n\nThanks a lot for the investigation!\n--\nMichael", "msg_date": "Fri, 25 Jun 2021 21:24:23 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": true, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "Hello Andrew & Micha�l,\n\nMy 0.02�:\n\n> There's a whole lot wrong with this code. To start with, why is that\n> unchecked eval there.\n\nYep. The idea was that other tests would go on being collected eg if the \nfile is not found, but it should have been checked anyway.\n\n> And why is it reading in log files on its own instead of using \n> TestLib::slurp_file, which, among other things, normalizes line endings?\n\nIndeed.\n\nHowever, if slurp_file fails it raises an exception and aborts the whole \nTAP unexpectedly, which is pretty unclean. So I'd suggest to keep the \neval, as attached. I tested it by changing the file name so that the slurp \nfails.\n\n> There's a very good chance that this latter is the issue. It only \n> affects msys which is why you didn't see an issue on MSVC. And also, why \n> does it carefully unlink the log files so that any trace of what's gone \n> wrong is deleted?\n\n> Based on the little I've seen this file needs a serious code review.\n\nProbably: My very old perl expertise is fading away because I'm not using \nit much these days. Cannot say I miss it:-)\n\n-- \nFabien.", "msg_date": "Sat, 26 Jun 2021 08:47:34 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "\nOn 6/26/21 2:47 AM, Fabien COELHO wrote:\n>\n> Hello Andrew & Michaᅵl,\n>\n> My 0.02ᅵ:\n>\n>> There's a whole lot wrong with this code. To start with, why is that\n>> unchecked eval there.\n>\n> Yep. The idea was that other tests would go on being collected eg if\n> the file is not found, but it should have been checked anyway.\n>\n>> And why is it reading in log files on its own instead of using\n>> TestLib::slurp_file, which, among other things, normalizes line endings?\n>\n> Indeed.\n>\n> However, if slurp_file fails it raises an exception and aborts the\n> whole TAP unexpectedly, which is pretty unclean. So I'd suggest to\n> keep the eval, as attached. I tested it by changing the file name so\n> that the slurp fails.\n\n\nSeem quite unnecessary. We haven't found that to be an issue elsewhere\nin the code where slurp_file is used. And in the present case we know\nthe file exists because we got its name from list_files().\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Sat, 26 Jun 2021 11:01:07 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "\n>> However, if slurp_file fails it raises an exception and aborts the\n>> whole TAP unexpectedly, which is pretty unclean. So I'd suggest to\n>> keep the eval, as attached. I tested it by changing the file name so\n>> that the slurp fails.\n>\n> Seem quite unnecessary. We haven't found that to be an issue elsewhere\n> in the code where slurp_file is used. And in the present case we know\n> the file exists because we got its name from list_files().\n\nFine with me!\n\n-- \nFabien.\n\n\n", "msg_date": "Sat, 26 Jun 2021 18:08:19 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "On Sat, Jun 26, 2021 at 11:01:07AM -0400, Andrew Dunstan wrote:\n> On 6/26/21 2:47 AM, Fabien COELHO wrote:\n>> However, if slurp_file fails it raises an exception and aborts the\n>> whole TAP unexpectedly, which is pretty unclean. So I'd suggest to\n>> keep the eval, as attached. I tested it by changing the file name so\n>> that the slurp fails.\n> \n> Seem quite unnecessary. We haven't found that to be an issue elsewhere\n> in the code where slurp_file is used. And in the present case we know\n> the file exists because we got its name from list_files().\n\nAgreed. That's an exchange between a hard failure mid-test and a\nfailure while letting the whole test run. Here, we expect the test to\nfind the log file all the time, so a hard failure does not sound like\na bad thing to me either.\n--\nMichael", "msg_date": "Sun, 27 Jun 2021 10:22:24 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": true, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" }, { "msg_contents": "\n>> Seem quite unnecessary. We haven't found that to be an issue elsewhere\n>> in the code where slurp_file is used. And in the present case we know\n>> the file exists because we got its name from list_files().\n>\n> Agreed. That's an exchange between a hard failure mid-test and a\n> failure while letting the whole test run. Here, we expect the test to\n> find the log file all the time, so a hard failure does not sound like\n> a bad thing to me either.\n\nOk, fine with me!\n\n-- \nFabien.\n\n\n", "msg_date": "Sun, 27 Jun 2021 07:48:25 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: pgsql: Fix pattern matching logic for logs in TAP tests of\n pgbench" } ]
[ { "msg_contents": "Hi.\n\n(For brevity, in this mail I refer to \"ALTER SUBSCRIPTION sub REFRESH\nPUBLICATION\" as \"ASRP\")\n\n--\n\nThe PG Docs for ASRP WITH (copy_data = true), says \"(Previously\nsubscribed tables are not copied.)\" [1].\n\nI thought this rule meant that only tables which got added by ALTER\nPUBLICATION pubname ADD TABLE ... [2] after the CREATE SUBSCRIPTION\nwould be affected by the copy_data.\n\nBut I recently learned that when there are partitions in the\npublication, then toggling the value of the PUBLICATION option\n\"publish_via_partition_root\" [3] can also *implicitly* change the list\npublished tables, and therefore that too might cause any ASRP to make\nuse of the copy_data value for those implicitly added\npartitions/tables.\n\nIt seems a bit too subtle.\n\nI was wondering if this should be made more obvious by a note added to\nthe PG Docs for the ASRP [1]. e.g. \"Previously subscribed tables are\nnot copied. Note: Tables may also be newly subscribed by changes to\nthe publish_via_partition_root option [link]\"\n\nOr perhaps, the \"publish_via_partition_root option\" Docs [3] should\nsay something. e.g. \"Note: Changing this option can affect the ASRP\ncopy_data [link].\n\nThoughts?\n\n-----\n[1] https://www.postgresql.org/docs/devel/sql-altersubscription.html\n[2] https://www.postgresql.org/docs/devel/sql-alterpublication.html\n[3] https://www.postgresql.org/docs/devel/sql-createpublication.html\n\nKind Regards,\nPeter Smith.\nFujitsu Australia\n\n\n", "msg_date": "Fri, 25 Jun 2021 13:49:48 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": true, "msg_subject": "PG Docs for ALTER SUBSCRIPTION REFRESH PUBLICATION - copy_data option" }, { "msg_contents": "(One month has passed since my original post but there have been no\nreplies to it).\n\nIt seems like the original post maybe just got buried with too many\nother mails so I am \"bumping\" this thread to elicit some response\nfor/against the suggestion.\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia.\n\n\n", "msg_date": "Wed, 25 Aug 2021 14:50:12 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": true, "msg_subject": "Re: PG Docs for ALTER SUBSCRIPTION REFRESH PUBLICATION - copy_data\n option" }, { "msg_contents": "On Fri, Jun 25, 2021 at 9:20 AM Peter Smith <smithpb2250@gmail.com> wrote:\n>\n> But I recently learned that when there are partitions in the\n> publication, then toggling the value of the PUBLICATION option\n> \"publish_via_partition_root\" [3] can also *implicitly* change the list\n> published tables, and therefore that too might cause any ASRP to make\n> use of the copy_data value for those implicitly added\n> partitions/tables.\n>\n\nI have tried the below example in this context but didn't see any\neffect on changing via_root option.\n\nSet up on both publisher and subscriber:\n=================================\nCREATE TABLE tab2 (a int PRIMARY KEY, b text) PARTITION BY LIST (a);\nCREATE TABLE tab2_1 (b text, a int NOT NULL);\nALTER TABLE tab2 ATTACH PARTITION tab2_1 FOR VALUES IN (0, 1, 2, 3);\nCREATE TABLE tab2_2 PARTITION OF tab2 FOR VALUES IN (5, 6);\n\nPublisher:\n==========\nCREATE PUBLICATION pub_viaroot FOR TABLE tab2_2;\npostgres=# INSERT INTO tab2 VALUES (1), (0), (3), (5);\nINSERT 0 4\npostgres=# select * from tab2_1;\n b | a\n---+---\n | 1\n | 0\n | 3\n(3 rows)\npostgres=# select * from tab2_2;\n a | b\n---+---\n 5 |\n(1 row)\n\n\nSubscriber:\n==========\nCREATE SUBSCRIPTION sub_viaroot CONNECTION 'host=localhost port=5432\ndbname=postgres' PUBLICATION pub_viaroot;\npostgres=# select * from tab2_2;\n a | b\n---+---\n 5 |\n(1 row)\npostgres=# select * from tab2_1;\n b | a\n---+---\n(0 rows)\n\nSo, by this step, we can see the partition which is not subscribed is\nnot copied. Now, let's toggle via_root option.\nPublisher\n=========\nAlter Publication pub_viaroot Set (publish_via_partition_root = true);\n\nSubscriber\n==========\npostgres=# Alter Subscription sub_viaroot Refresh Publication;\nALTER SUBSCRIPTION\npostgres=# select * from tab2_2;\n a | b\n---+---\n 5 |\n(1 row)\npostgres=# select * from tab2_1;\n b | a\n---+---\n(0 rows)\n\nAs per your explanation, one can expect the data in tab2_1 in the last\nstep. Can you explain with example?\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 14 Sep 2021 16:03:15 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: PG Docs for ALTER SUBSCRIPTION REFRESH PUBLICATION - copy_data\n option" }, { "msg_contents": "On Tue, Sep 14, 2021 at 8:33 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Fri, Jun 25, 2021 at 9:20 AM Peter Smith <smithpb2250@gmail.com> wrote:\n> >\n> > But I recently learned that when there are partitions in the\n> > publication, then toggling the value of the PUBLICATION option\n> > \"publish_via_partition_root\" [3] can also *implicitly* change the list\n> > published tables, and therefore that too might cause any ASRP to make\n> > use of the copy_data value for those implicitly added\n> > partitions/tables.\n> >\n>\n> I have tried the below example in this context but didn't see any\n> effect on changing via_root option.\n\nThanks for trying to reproduce. I also thought your steps were the\nsame as what I'd previously done but it seems like it was a bit\ndifferent. Below are my steps to observe some unexpected COPY\nhappening. Actually, now I am no longer sure if this is just a\ndocumentation issue; perhaps it is a bug.\n\nSTEP 1 - create partition tables on both sides\n===================================\n\n[PUB and SUB]\n\npostgres=# create table troot (a int) partition by range(a);\nCREATE TABLE\npostgres=# create table tless10 partition of troot for values from (1) to (9);\nCREATE TABLE\npostgres=# create table tmore10 partition of troot for values from (10) to (99);\nCREATE TABLE\n\nSTEP 2 - insert some data on pub-side\n==============================\n\n[PUB]\n\npostgres=# insert into troot values (1),(2),(3);\nINSERT 0 3\npostgres=# insert into troot values (11),(12),(13);\nINSERT 0 3\n\npostgres=# select * from troot;\n a\n----\n 1\n 2\n 3\n 11\n 12\n 13\n(6 rows)\n\nSTEP 3 - create a publication on the partition root\n======================================\n\n[PUB]\n\npostgres=# CREATE PUBLICATION pub1 FOR TABLE troot;\nCREATE PUBLICATION\npostgres=# \\dRp+ pub1;\n Publication pub1\n Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root\n----------+------------+---------+---------+---------+-----------+----------\n postgres | f | t | t | t | t | f\nTables:\n \"public.troot\"\n\n\nSTEP 4 - create the subscriber\n=======================\n\n[SUB]\n\npostgres=# CREATE SUBSCRIPTION sub1 CONNECTION 'host=127.0.0.1\nport=5432 dbname=postgres' PUBLICATION pub1;\nNOTICE: created replication slot \"sub1\" on publisher\nCREATE SUBSCRIPTION\npostgres=# 2021-09-15 12:45:12.224 AEST [30592] LOG: logical\nreplication apply worker for subscription \"sub1\" has started\n2021-09-15 12:45:12.236 AEST [30595] LOG: logical replication table\nsynchronization worker for subscription \"sub1\", table \"tless10\" has\nstarted\n2021-09-15 12:45:12.247 AEST [30598] LOG: logical replication table\nsynchronization worker for subscription \"sub1\", table \"tmore10\" has\nstarted\n2021-09-15 12:45:12.326 AEST [30595] LOG: logical replication table\nsynchronization worker for subscription \"sub1\", table \"tless10\" has\nfinished\n2021-09-15 12:45:12.332 AEST [30598] LOG: logical replication table\nsynchronization worker for subscription \"sub1\", table \"tmore10\" has\nfinished\n\npostgres=# select * from troot;\n a\n----\n 1\n 2\n 3\n 11\n 12\n 13\n(6 rows)\n\n// To this point, everything looks OK...\n\nSTEP 5 - toggle the publish_via_partition_root flag\n======================================\n\n[PUB]\n\npostgres=# alter publication pub1 set (publish_via_partition_root = true);\nALTER PUBLICATION\npostgres=# \\dRp+ pub1;\n Publication pub1\n Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root\n----------+------------+---------+---------+---------+-----------+----------\n postgres | f | t | t | t | t | t\nTables:\n \"public.troot\"\n\n// And then refresh the subscriber\n\n[SUB]\n\npostgres=# alter subscription sub1 refresh PUBLICATION;\nALTER SUBSCRIPTION\npostgres=# 2021-09-15 12:48:37.927 AEST [3861] LOG: logical\nreplication table synchronization worker for subscription \"sub1\",\ntable \"troot\" has started\n2021-09-15 12:48:37.977 AEST [3861] LOG: logical replication table\nsynchronization worker for subscription \"sub1\", table \"troot\" has\nfinished\n\n// Notice above that another tablesync worker has launched and copied\neverything again - BUG??\n\n[SUB]\n\npostgres=# select * from troot;\n a\n----\n 1\n 2\n 3\n 1\n 2\n 3\n 11\n 12\n 13\n 11\n 12\n 13\n(12 rows)\n\n// At this point if I would keep toggling the\npublish_via_partition_root then each time I do subscription REFRESH\nPUBLICATION it will copy the data yet again. For example,\n\n[PUB]\n\npostgres=# alter publication pub1 set (publish_via_partition_root = false);\nALTER PUBLICATION\n\n[SUB]\n\npostgres=# alter subscription sub1 refresh PUBLICATION;\nALTER SUBSCRIPTION\npostgres=# 2021-09-15 12:59:02.106 AEST [21709] LOG: logical\nreplication table synchronization worker for subscription \"sub1\",\ntable \"tless10\" has started\n2021-09-15 12:59:02.120 AEST [21711] LOG: logical replication table\nsynchronization worker for subscription \"sub1\", table \"tmore10\" has\nstarted\n2021-09-15 12:59:02.189 AEST [21709] LOG: logical replication table\nsynchronization worker for subscription \"sub1\", table \"tless10\" has\nfinished\n2021-09-15 12:59:02.207 AEST [21711] LOG: logical replication table\nsynchronization worker for subscription \"sub1\", table \"tmore10\" has\nfinished\n\nBy now the pub/sub data on each side is quite different\n==========================================\n\n[PUB]\n\npostgres=# select count(*) from troot;\n count\n-------\n 6\n(1 row)\n\n[SUB]\n\npostgres=# select count(*) from troot;\n count\n-------\n 18\n(1 row)\n\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia\n\n\n", "msg_date": "Wed, 15 Sep 2021 13:19:38 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": true, "msg_subject": "Re: PG Docs for ALTER SUBSCRIPTION REFRESH PUBLICATION - copy_data\n option" }, { "msg_contents": "On Wed, Sep 15, 2021 at 8:49 AM Peter Smith <smithpb2250@gmail.com> wrote:\n>\n> On Tue, Sep 14, 2021 at 8:33 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >\n> > On Fri, Jun 25, 2021 at 9:20 AM Peter Smith <smithpb2250@gmail.com> wrote:\n> > >\n> > > But I recently learned that when there are partitions in the\n> > > publication, then toggling the value of the PUBLICATION option\n> > > \"publish_via_partition_root\" [3] can also *implicitly* change the list\n> > > published tables, and therefore that too might cause any ASRP to make\n> > > use of the copy_data value for those implicitly added\n> > > partitions/tables.\n> > >\n> >\n> > I have tried the below example in this context but didn't see any\n> > effect on changing via_root option.\n>\n> Thanks for trying to reproduce. I also thought your steps were the\n> same as what I'd previously done but it seems like it was a bit\n> different. Below are my steps to observe some unexpected COPY\n> happening. Actually, now I am no longer sure if this is just a\n> documentation issue; perhaps it is a bug.\n>\n\nYeah, this looks odd to me as well.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 15 Sep 2021 09:20:22 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: PG Docs for ALTER SUBSCRIPTION REFRESH PUBLICATION - copy_data\n option" } ]
[ { "msg_contents": "Hi all\n\nAs a result of some recent work on Windows I have a list of PGDLLIMPORTs\nI'd like to add to existing exported globals.\n\nAll affected variables are already extern, so this doesn't expose any new\nAPI not already available to non-Windows extensions.\n\nI've split the patch up for clarity:\n\n* v1-0001: PGDLLIMPORTs for xlog.c's XactLastRecEnd, ProcLastRecPtr and\nreachedConsistency . I only really need XactLastRecEnd but think it's\nsensible to expose all of them.\n\n* v1-0002: PGDLLIMPORT for struct WalRecv . Allows extensions to observe\nWAL receiver state and behaviour.\n\n* v1-0003: PGDLLIMPORT criticalSharedRelcachesBuilt and\ncriticalRelcachesBuilt . I only really need criticalSharedRelcachesBuilt\nbut it seems sensible to export both. Useful when extension code may run\nduring early startup (_PG_init in shared_preload_libraries, shmem init,\netc) and later during normal running.\n\n* v1-0004: PGDLLIMPORT a set of useful GUCs and vars containing GUC-derived\nstate in xlog.h and walreceiver.h\n\nI will follow up soon with a patch that marks every GUC as PGDLLIMPORT\nincluding any vars derived from the GUC by hooks. I don't see much point\ndoing this piecemeal since they're all externs anyway. That patch will\nreplace patch 4 above, but not patches 1-3.\n\nI'd love to see these PGDLLIMPORTs backported to pg13.", "msg_date": "Fri, 25 Jun 2021 12:51:45 +0800", "msg_from": "Craig Ringer <craig.ringer@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Adding more PGDLLIMPORTs" } ]
[ { "msg_contents": "Till now, we didn't allow to stream the changes in logical replication\ntill we receive speculative confirm or the next DML change record\nafter speculative inserts. The reason was that we never use to process\nspeculative aborts but after commit 4daa140a2f it is possible to\nprocess them so we can allow streaming once we receive speculative\nabort after speculative insertion. See attached.\n\nI think this is a minor improvement in the logical replication of\nin-progress transactions. I have verified this for speculative aborts\nand it allows streaming once we receive the spec_abort change record.\n\n-- \nWith Regards,\nAmit Kapila.", "msg_date": "Fri, 25 Jun 2021 12:24:24 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": true, "msg_subject": "Allow streaming the changes after speculative aborts." }, { "msg_contents": "On Fri, Jun 25, 2021 at 12:24 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> Till now, we didn't allow to stream the changes in logical replication\n> till we receive speculative confirm or the next DML change record\n> after speculative inserts. The reason was that we never use to process\n> speculative aborts but after commit 4daa140a2f it is possible to\n> process them so we can allow streaming once we receive speculative\n> abort after speculative insertion. See attached.\n>\n> I think this is a minor improvement in the logical replication of\n> in-progress transactions. I have verified this for speculative aborts\n> and it allows streaming once we receive the spec_abort change record.\n\nYeah, this improvement makes sense. And the patch looks fine to me.\n\n-- \nRegards,\nDilip Kumar\nEnterpriseDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 29 Jun 2021 12:57:06 +0530", "msg_from": "Dilip Kumar <dilipbalaut@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Allow streaming the changes after speculative aborts." }, { "msg_contents": "On Tue, Jun 29, 2021 at 12:57 PM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n>\n> On Fri, Jun 25, 2021 at 12:24 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >\n> > Till now, we didn't allow to stream the changes in logical replication\n> > till we receive speculative confirm or the next DML change record\n> > after speculative inserts. The reason was that we never use to process\n> > speculative aborts but after commit 4daa140a2f it is possible to\n> > process them so we can allow streaming once we receive speculative\n> > abort after speculative insertion. See attached.\n> >\n> > I think this is a minor improvement in the logical replication of\n> > in-progress transactions. I have verified this for speculative aborts\n> > and it allows streaming once we receive the spec_abort change record.\n>\n> Yeah, this improvement makes sense. And the patch looks fine to me.\n>\n\nThanks. Now, that the PG-15 branch is created, I think we should\ncommit this to both 15 and 14 as this is a minor change. What do you\nthink?\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 30 Jun 2021 09:29:30 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Allow streaming the changes after speculative aborts." }, { "msg_contents": "On Wed, Jun 30, 2021 at 9:29 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Tue, Jun 29, 2021 at 12:57 PM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n> >\n> > On Fri, Jun 25, 2021 at 12:24 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> > >\n> > > Till now, we didn't allow to stream the changes in logical replication\n> > > till we receive speculative confirm or the next DML change record\n> > > after speculative inserts. The reason was that we never use to process\n> > > speculative aborts but after commit 4daa140a2f it is possible to\n> > > process them so we can allow streaming once we receive speculative\n> > > abort after speculative insertion. See attached.\n> > >\n> > > I think this is a minor improvement in the logical replication of\n> > > in-progress transactions. I have verified this for speculative aborts\n> > > and it allows streaming once we receive the spec_abort change record.\n> >\n> > Yeah, this improvement makes sense. And the patch looks fine to me.\n> >\n>\n> Thanks. Now, that the PG-15 branch is created, I think we should\n> commit this to both 15 and 14 as this is a minor change. What do you\n> think?\n\nYeah, this is a minor improvement so can be pushed to both 15 and 14.\n\n-- \nRegards,\nDilip Kumar\nEnterpriseDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Wed, 30 Jun 2021 09:55:26 +0530", "msg_from": "Dilip Kumar <dilipbalaut@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Allow streaming the changes after speculative aborts." }, { "msg_contents": "On Wed, Jun 30, 2021 at 9:55 AM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n>\n> On Wed, Jun 30, 2021 at 9:29 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >\n> > On Tue, Jun 29, 2021 at 12:57 PM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n> > >\n> > > On Fri, Jun 25, 2021 at 12:24 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> > > >\n> > > > Till now, we didn't allow to stream the changes in logical replication\n> > > > till we receive speculative confirm or the next DML change record\n> > > > after speculative inserts. The reason was that we never use to process\n> > > > speculative aborts but after commit 4daa140a2f it is possible to\n> > > > process them so we can allow streaming once we receive speculative\n> > > > abort after speculative insertion. See attached.\n> > > >\n> > > > I think this is a minor improvement in the logical replication of\n> > > > in-progress transactions. I have verified this for speculative aborts\n> > > > and it allows streaming once we receive the spec_abort change record.\n> > >\n> > > Yeah, this improvement makes sense. And the patch looks fine to me.\n> > >\n> >\n> > Thanks. Now, that the PG-15 branch is created, I think we should\n> > commit this to both 15 and 14 as this is a minor change. What do you\n> > think?\n>\n> Yeah, this is a minor improvement so can be pushed to both 15 and 14.\n>\n\nThanks, pushed!\n\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 30 Jun 2021 13:44:57 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Allow streaming the changes after speculative aborts." }, { "msg_contents": "On Wed, Jun 30, 2021 at 4:15 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> > > Thanks. Now, that the PG-15 branch is created, I think we should\n> > > commit this to both 15 and 14 as this is a minor change. What do you\n> > > think?\n> >\n> > Yeah, this is a minor improvement so can be pushed to both 15 and 14.\n>\n> Thanks, pushed!\n\nI think if you're going to back-patch things that are arguably new\nfeatures into stable branches, you ought to give people more than 4\nhours and 16 minutes to object. That's how much time passed between\nthe proposal to back-patch and the commit getting pushed.\n\nI'm not objecting to the change as such - though someone else may wish\nto - but I'm definitely objecting to the timing of the commit.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Wed, 30 Jun 2021 08:07:55 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Allow streaming the changes after speculative aborts." }, { "msg_contents": "On Wed, Jun 30, 2021 at 5:38 PM Robert Haas <robertmhaas@gmail.com> wrote:\n>\n> On Wed, Jun 30, 2021 at 4:15 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> I'm not objecting to the change as such - though someone else may wish\n> to - but I'm definitely objecting to the timing of the commit.\n>\n\nOkay, I'll wait for more time going forward. Normally, I do wait but\nthis appeared straightforward to me so I went ahead.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 30 Jun 2021 17:45:08 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Allow streaming the changes after speculative aborts." } ]
[ { "msg_contents": "Dear Hackers,\n\nI checked about DECLARE STATEMENT(added from ad8305a), and I noticed that\nthis connection-control feature cannot be used for DEALLOCATE and DESCRIBE statement.\n\nI attached the patch that fixes these bugs, this contains source and test code.\n\nHow do you think? \n\nBest Regards,\nHayato Kuroda\nFUJITSU LIMITED", "msg_date": "Fri, 25 Jun 2021 12:02:22 +0000", "msg_from": "\"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com>", "msg_from_op": true, "msg_subject": "ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "At Fri, 25 Jun 2021 12:02:22 +0000, \"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com> wrote in \n> Dear Hackers,\n> \n> I checked about DECLARE STATEMENT(added from ad8305a), and I noticed that\n> this connection-control feature cannot be used for DEALLOCATE and DESCRIBE statement.\n> \n> I attached the patch that fixes these bugs, this contains source and test code.\n> \n> How do you think? \n\n(Maybe by consulting the code.. Anyway, )\n\nprepared_name is used in the follwoing statement rules.\n\nThe following commands handle the liked connection.\nDECLARE\nPREPARE\nEXECUTE\n\nThe follwoing commands don't.\nDESCRIBE\nDEALLOCATE\nDECLARE CURSOR .. FOR\nCREATE TABLE AS EXECUTE\n\nAlthough I'm not sure it is definitely a bug or not, it seems\nreasonable that the first two follow the liked connection.\n\nI'm not sure about the last two. Since ecpg doesn't allow two prepared\nstatements with the same name even if they are on different\nconnections. So the two can also follow the connection linked to the\ngiven statements. DECLARE CURSOR could be follow the liked connection\nsafely but CREATE TABLE AS EXECUTE doesn't seem safe.\n\nI'm not sure how ALLOCATE DESCRIPTOR should behave. Without \"AT conn\"\nattached, the descriptor is recorded being bound to the connection\n\"null\"(or nothing). GET DESCRIPTOR for the statement stmt tries to\nfind a descriptor with the same name but bound to c1, which does not\nexist.\n\nAs the result ecpg complains like this:\n\n EXEC SQL CONNECT TO 'db1@,..' AS c1;\n EXEC SQL AT c1 DECLARE stmt STATEMENT;\n EXEC SQL PREPARE stmt FROM \"...\";\n EXEC SQL ALLOCATE DESCRIPTOR desc;\n EXEC SQL DESCRIBE stmt INTO SQL DESCRIPTOR desc;\n41: EXEC SQL GET DESCRIPTOR desc VALUE 1 :name = NAME;\n\n> ecpgtest.pgc:41: WARNING: descriptor \"\"desc\"\" does not exist\n\n(Note that the warning mistakenly fires also when the physical order\nof ALLOCATE and GET DESCRIPTOR is reversed in a .pgc file.)\n\nI don't come up with an idea how to \"fix\" it (or I don't find what is\nthe sane behavior for this feature..), but anyway, I find it hard to\nfind what to do next from the warning. It might be helpful that the\nwarning shows the connection.\n\n> ecpgtest.pgc:41: WARNING: descriptor \"\"desc\"\" bound to connection \"\"c1\"\" does not exist\n\n(It looks strange that the name is quoted twice but it would be\n another issue.)\n\n\nECPGDescribe: SQL_DESCRIBE INPUT_P prepared_name using_descriptor\n \t{\n-\t\tconst char *con = connection ? connection : \"NULL\";\n+\t\tconst char *con;\n+\n+\t\tcheck_declared_list($3);\n+\t\tcon = connection ? connection : \"NULL\";\n \t\tmmerror(PARSE_ERROR, ET_WARNING, \"using unsupported DESCRIBE statement\");\n\nHonestly, I don't like the boilerplate. There's no reason for handling\nconnection at the level. It seems to me that it is better that the\nrule ECPGDescribe passes the parameters force_indicator and stmt name\nup to the parent rule-component (stmt:ECPGDescribe) , then the parent\ngenerates the function-call string.\n\n\nThe test portion bloats the patch so it would be easier to read if\nthat part is separated from the code part.\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center\n\n\n", "msg_date": "Thu, 01 Jul 2021 17:48:49 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "At Thu, 01 Jul 2021 17:48:49 +0900 (JST), Kyotaro Horiguchi <horikyota.ntt@gmail.com> wrote in \n> At Fri, 25 Jun 2021 12:02:22 +0000, \"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com> wrote in \n> The following commands handle the liked connection.\n> DECLARE\n> PREPARE\n> EXECUTE\n> \n> The follwoing commands don't.\n> DESCRIBE\n> DEALLOCATE\n> DECLARE CURSOR .. FOR\n> CREATE TABLE AS EXECUTE\n\nMmm. It's wrong. CREATE TABLE AS EXECUTE follows. So DECLARE CURSOR\nshould follow?\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center\n\n\n", "msg_date": "Thu, 01 Jul 2021 18:07:06 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Dear Horiguchi-san,\n\nThank you for replying!\n\n> (Maybe by consulting the code.. Anyway, )\n\nI noticed the patch cannot be applied...\n\n> The follwoing commands don't.\n> DESCRIBE\n> DEALLOCATE\n> DECLARE CURSOR .. FOR\n> CREATE TABLE AS EXECUTE\n\nI'm not sure about `CREATE TABLE AS EXECUTE`(I'll check your new thread), but at least,\nI think `DECLARE CURSOR` uses linked connection.\n\nThe following .pgc code:\n\n```pgc\n EXEC SQL CONNECT TO postgres AS connection1;\n EXEC SQL CONNECT TO template1 AS connection2;\n EXEC SQL SET CONNECTION TO connection2;\n\n EXEC SQL AT connection1 DECLARE sql STATEMENT;\n EXEC SQL PREPARE sql FROM \"SELECT current_database()\";\n\n EXEC SQL DECLARE cur CURSOR FOR sql;\n EXEC SQL OPEN cur;\n```\n\nwill become like this(picked only last two lines):\n\n```c\n /* declare cur cursor for $1 */\n\n { ECPGdo(__LINE__, 0, 1, \"connection1\", 0, ECPGst_normal, \"declare cur cursor for $1\", \n ECPGt_char_variable,(ECPGprepared_statement(\"connection1\", \"sql\", __LINE__)),(long)1,(long)1,(1)*sizeof(char), \n ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);}\n```\n\nOf cause, according to [1], the connection is overwritten by check_declared_list()\nand it's saved to this->connection.\nPlease tell me if I misunderstand something.\n\n> I'm not sure how ALLOCATE DESCRIPTOR should behave. Without \"AT conn\"\n> attached, the descriptor is recorded being bound to the connection\n> \"null\"(or nothing). GET DESCRIPTOR for the statement stmt tries to\n> find a descriptor with the same name but bound to c1, which does not\n> exist.\n\nRight. lookup_descriptor() will throw mmerror().\n\n> I don't come up with an idea how to \"fix\" it (or I don't find what is\n> the sane behavior for this feature..), but anyway, I find it hard to\n> find what to do next from the warning. It might be helpful that the\n> warning shows the connection.\n\nI think this phenomenon is quite normal, not bug. When users use connection-associated\nprepared_name, it implies using AT clause.\nHowever, I perfectly agree that it's very difficult for users to find a problem from the message.\nI will try to add information to it in the next patch.\n\n> Honestly, I don't like the boilerplate. There's no reason for handling\n> connection at the level. It seems to me that it is better that the\n> rule ECPGDescribe passes the parameters force_indicator and stmt name\n> up to the parent rule-component (stmt:ECPGDescribe) , then the parent\n> generates the function-call string.\n\nYou're right. This is very stupid program. I'll combine them into one.\n\n> The test portion bloats the patch so it would be easier to read if\n> that part is separated from the code part.\n\nRight, I'll separate and attach again few days. Sorry for inconvenience;-(.\n\n[1]: https://github.com/postgres/postgres/blob/master/src/interfaces/ecpg/preproc/ecpg.trailer#L345\n\nBest Regards,\nHayato Kuroda\nFUJITSU LIMITED\n\n\n\n", "msg_date": "Thu, 1 Jul 2021 12:55:04 +0000", "msg_from": "\"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com>", "msg_from_op": true, "msg_subject": "RE: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "\"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com> writes:\n>> The test portion bloats the patch so it would be easier to read if\n>> that part is separated from the code part.\n\n> Right, I'll separate and attach again few days. Sorry for inconvenience;-(.\n\nPlease also ensure that you're generating the patch against git HEAD.\nThe cfbot shows it as failing to apply, likely because you're looking\nat something predating ad8305a43d1890768a613d3fb586b44f17360f29.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 01 Jul 2021 12:57:38 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Dear Hackers,\n\nI revised my patch.\n\n> Please also ensure that you're generating the patch against git HEAD.\n> The cfbot shows it as failing to apply, likely because you're looking\n> at something predating ad8305a43d1890768a613d3fb586b44f17360f29.\n\nMaybe there was something wrong with my local environment. Sorry.\n\n> However, I perfectly agree that it's very difficult for users to find a problem from the message.\n> I will try to add information to it in the next patch.\n\nI added such a message and some tests, but I began to think this is strange.\nNow I'm wondering why the connection is checked in some DESCRIPTOR-related\nstatements? In my understanding connection name is not used in ECPGallocate_desc(),\nECPGdeallocate_desc(), ECPGget_desc() and so on.\nHence I think lookup_descriptor() and drop_descriptor() can be removed.\nThis idea can solve your first argument.\n\n> You're right. This is very stupid program. I'll combine them into one.\n\nCheck_declared_list() was moved to stmt:ECPGDescribe rule.\nSome similar rules still remain in ecpg.trailer, but INPUT/OUTPUT statements have\ndifferent rules and actions and I cannot combine well.\n\nBest Regards,\nHayato Kuroda\nFUJITSU LIMITED", "msg_date": "Fri, 2 Jul 2021 12:53:02 +0000", "msg_from": "\"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com>", "msg_from_op": true, "msg_subject": "RE: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Fri, Jul 02, 2021 at 12:53:02PM +0000, kuroda.hayato@fujitsu.com wrote:\n> I added such a message and some tests, but I began to think this is strange.\n> Now I'm wondering why the connection is checked in some DESCRIPTOR-related\n> statements? In my understanding connection name is not used in ECPGallocate_desc(),\n> ECPGdeallocate_desc(), ECPGget_desc() and so on.\n> Hence I think lookup_descriptor() and drop_descriptor() can be removed.\n> This idea can solve your first argument.\n\nI have been chewing on this comment and it took me some time to\nunderstand what you meant here. It is true that the ecpglib part, aka\nall the routines you are quoting above, don't rely at all on the\nconnection names. However, the preprocessor warnings generated by\ndrop_descriptor() and lookup_descriptor() seem useful to me to get\ninformed when doing incorrect descriptor manipulations, say on\ndescriptors that refer to incorrect object names. So I would argue\nfor keeping these.\n\n0002 includes the following diffs:\n\n-[NO_PID]: raising sqlcode -230 on line 111: invalid statement name \"stmt_2\" on line 111\n-[NO_PID]: sqlca: code: -230, state: 26000\n-SQL error: invalid statement name \"stmt_2\" on line 111\n+[NO_PID]: deallocate_one on line 111: name stmt_2\n+[NO_PID]: sqlca: code: 0, state: 00000\n[...]\n-[NO_PID]: raising sqlcode -230 on line 135: invalid statement name \"stmt_3\" on line 135\n-[NO_PID]: sqlca: code: -230, state: 26000\n-SQL error: invalid statement name \"stmt_3\" on line 135\n+[NO_PID]: deallocate_one on line 135: name stmt_3\n+[NO_PID]: sqlca: code: 0, state: 00000\n\nAnd indeed, I would have expected those queries introduced by ad8305a\nto pass. So a backpatch down to v14 looks adapted.\n\nI am going to need more time to finish evaluating this patch, but it\nseems that this moves to the right direction. The new warnings for\nlookup_descriptor() and drop_descriptor() with the connection name are\nuseful. Should we have more cases with con2 in the new set of tests\nfor DESCRIBE? \n--\nMichael", "msg_date": "Tue, 6 Jul 2021 16:58:03 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Tue, Jul 06, 2021 at 04:58:03PM +0900, Michael Paquier wrote:\n> I have been chewing on this comment and it took me some time to\n> understand what you meant here. It is true that the ecpglib part, aka\n> all the routines you are quoting above, don't rely at all on the\n> connection names. However, the preprocessor warnings generated by\n> drop_descriptor() and lookup_descriptor() seem useful to me to get\n> informed when doing incorrect descriptor manipulations, say on\n> descriptors that refer to incorrect object names. So I would argue\n> for keeping these.\n\nBy the way, as DECLARE is new as of v14, I think that the interactions\nbetween DECLARE and the past queries qualify as an open item. I am\nadding Michael Meskes in CC. I got to wonder how much of a\ncompatibility break it would be for DEALLOCATE and DESCRIBE to handle\nEXEC SQL AT in a way more consistent than DECLARE, even if these are\nbounded to a result set, and not a connection.\n--\nMichael", "msg_date": "Tue, 6 Jul 2021 17:04:15 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "At Fri, 2 Jul 2021 12:53:02 +0000, \"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com> wrote in \n> Dear Hackers,\n> \n> I revised my patch.\n\nThanks for the new version.\n\n> > However, I perfectly agree that it's very difficult for users to find a problem from the message.\n> > I will try to add information to it in the next patch.\n> \n> I added such a message and some tests, but I began to think this is strange.\n> Now I'm wondering why the connection is checked in some DESCRIPTOR-related\n> statements? In my understanding connection name is not used in ECPGallocate_desc(),\n> ECPGdeallocate_desc(), ECPGget_desc() and so on.\n> Hence I think lookup_descriptor() and drop_descriptor() can be removed.\n> This idea can solve your first argument.\n\nMaybe (pre)compile-time check is needed for the descriptor names.\nOtherwise we don't notice some of the names are spelled wrongly until\nruntime. If it were a string we can live without the check but it is\nseemingly an identifier so it is strange that it is not detected at\ncompile-time. I guess that it is the motivation for the check.\n\nWhat makes the story complex is that connection matters in the\nrelation between DESCRIBE and GET DESCRIPTOR. (However, connection\ndoesn't matter in ALLOCATE DESCRIPTOR..) Maybe the check involves\nconnection for this reason.\n\nSince we don't allow descriptors with the same name even if they are\nfor the different connections, I think we can set the current\nconnection if any (which is set either by AT option or statement-bound\none) to i->connection silently if i->connection is NULL in\nlookup_descriptor(). What do you think about this?\n\n=====\nI noticed the following behavior.\n\n> EXEC SQL AT conn1 DECLARE stmt STATEMENT;\n> EXEC SQL DESCRIBE stmt INTO SQL DESCRIPTOR mydesc;\n> EXEC SQL SET CONNECTION conn2;\nERROR: AT option not allowed in SET CONNECTION STATEMENT\n\nconnection is \"conn1\" at the error time. The parser relies on\noutput_statement and friends for connection name reset. So the rules\nthat don't call the functions need to reset it by themselves.\n\nSimilary, the following sequence doesn't yield an error, which is\nexpected.\n\n> EXEC SQL AT conn1 DECLARE stmt STATEMENT;\n> EXEC SQL AT conn2 EXECUTE stmt INTO ..;\n\nIn this case \"conn2\" set by the AT option is silently overwritten with\n\"conn1\" by check_declared_list(). I think we should reject AT option\n(with a different connection) in that case.\n\n\n\n> > You're right. This is very stupid program. I'll combine them into one.\n> \n> Check_declared_list() was moved to stmt:ECPGDescribe rule.\n> Some similar rules still remain in ecpg.trailer, but INPUT/OUTPUT statements have\n> different rules and actions and I cannot combine well.\n\nI'm not sure what you exactly mean by the \"INPUT/OUTPUT statements\"\nbut the change of DESCRIBE INPUT/OUTPUT looks fine to me.\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center\n\n\n", "msg_date": "Tue, 06 Jul 2021 17:29:27 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Dear Michael,\n\n> I have been chewing on this comment and it took me some time to\n> understand what you meant here.\n\nSorry... But your understanding is correct.\n\n> It is true that the ecpglib part, aka\n> all the routines you are quoting above, don't rely at all on the\n> connection names. However, the preprocessor warnings generated by\n> drop_descriptor() and lookup_descriptor() seem useful to me to get\n> informed when doing incorrect descriptor manipulations, say on\n> descriptors that refer to incorrect object names. So I would argue\n> for keeping these.\n\nThank you for giving your argument. I will keep in the next patch.\n\n> And indeed, I would have expected those queries introduced by ad8305a\n> to pass. So a backpatch down to v14 looks adapted.\n\nYeah. I think, at least, DEALLOCATE statement should use the associated connection.\n\n\n> I am going to need more time to finish evaluating this patch, but it\n> seems that this moves to the right direction. The new warnings for\n> lookup_descriptor() and drop_descriptor() with the connection name are\n> useful. Should we have more cases with con2 in the new set of tests\n> for DESCRIBE? \n\nThanks. OK, I'll add them to it.\n\n> By the way, as DECLARE is new as of v14, I think that the interactions\n> between DECLARE and the past queries qualify as an open item. I am\n> adding Michael Meskes in CC. I got to wonder how much of a\n> compatibility break it would be for DEALLOCATE and DESCRIBE to handle\n> EXEC SQL AT in a way more consistent than DECLARE, even if these are\n> bounded to a result set, and not a connection.\n\nI already said above, I think that DEALLOCATE statement should\nfollow the linked connection, but I cannot decide about DESCRIBE.\nI want to ask how do you think.\n\nBest Regards,\nHayato Kuroda\nFUJITSU LIMITED\n\n\n\n", "msg_date": "Thu, 8 Jul 2021 11:42:14 +0000", "msg_from": "\"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com>", "msg_from_op": true, "msg_subject": "RE: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Thu, Jul 08, 2021 at 11:42:14AM +0000, kuroda.hayato@fujitsu.com wrote:\n> I already said above, I think that DEALLOCATE statement should\n> follow the linked connection, but I cannot decide about DESCRIBE.\n> I want to ask how do you think.\n\nI am not completely sure. It would be good to hear from Michael\nMeskes about that, and the introduction of DECLARE makes the barrier\nabout the use of preferred connections blurrier for those other\nqueries.\n--\nMichael", "msg_date": "Fri, 9 Jul 2021 11:59:43 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Dear Horiguchi-san,\n\nThank you for reviewing! I attached new version.\nSorry for delaying reply.\n\n> Since we don't allow descriptors with the same name even if they are\n> for the different connections, I think we can set the current\n> connection if any (which is set either by AT option or statement-bound\n> one) to i->connection silently if i->connection is NULL in\n> lookup_descriptor(). What do you think about this?\n\nI tried to implement. Is it correct?\n\n> connection is \"conn1\" at the error time. The parser relies on\n> output_statement and friends for connection name reset. So the rules\n> that don't call the functions need to reset it by themselves.\n\nOh, I didn't notice that. Fixed.\nI'm wondering why a output function is not implemented, like output_describe_statement(),\nbut anyway I put a connection reset in ecpg.addons.\n\n> Similary, the following sequence doesn't yield an error, which is\n> expected.\n> \n> > EXEC SQL AT conn1 DECLARE stmt STATEMENT;\n> > EXEC SQL AT conn2 EXECUTE stmt INTO ..;\n> \n> In this case \"conn2\" set by the AT option is silently overwritten with\n> \"conn1\" by check_declared_list(). I think we should reject AT option\n> (with a different connection) in that case.\n\nActually this comes from Oracle's specification. Pro*C precompiler\noverwrite their connection in the situation, hence I followed that.\nBut I agree this might be confused and I added the warning report.\nHow do you think? Is it still strange?\n\nBest Regards,\nHayato Kuroda\nFUJITSU LIMITED", "msg_date": "Mon, 12 Jul 2021 04:05:21 +0000", "msg_from": "\"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com>", "msg_from_op": true, "msg_subject": "RE: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Hi kuroda-san:\n\nI find another problem about declare statement. The test source looks like:\n> EXEC SQL AT con1 DECLARE stmt STATEMENT;\n> EXEC SQL PREPARE stmt AS SELECT version();\n> EXEC SQL DECLARE cur CURSOR FOR stmt;\n> EXEC SQL WHENEVER SQLERROR STOP;\n\nThe outout about ecpg:\n>test.pgc:14: ERROR: AT option not allowed in WHENEVER statement\n\nAfter a simple research, I found that after calling function check_declared_list,\nthe variable connection will be updated, but in some case(e.g. ECPGCursorStmt)\nreset connection is missing.\n\nI'm not sure, but how about modify the ecpg.trailer:\n> tatement: ecpgstart at toplevel_stmt ';' { connection = NULL; }\n> | ecpgstart toplevel_stmt ';'\n\nI think there are lots of 'connection = NULL;' in source, and we should find a \ngood location to reset the connection.\n\n\nBest regards.\nShenhao Wang\n\n\n-----Original Message-----\nFrom: kuroda.hayato@fujitsu.com <kuroda.hayato@fujitsu.com> \nSent: Monday, July 12, 2021 12:05 PM\nTo: 'Kyotaro Horiguchi' <horikyota.ntt@gmail.com>\nCc: pgsql-hackers@lists.postgresql.org\nSubject: RE: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE\n\nDear Horiguchi-san,\n\nThank you for reviewing! I attached new version.\nSorry for delaying reply.\n\n> Since we don't allow descriptors with the same name even if they are\n> for the different connections, I think we can set the current\n> connection if any (which is set either by AT option or statement-bound\n> one) to i->connection silently if i->connection is NULL in\n> lookup_descriptor(). What do you think about this?\n\nI tried to implement. Is it correct?\n\n> connection is \"conn1\" at the error time. The parser relies on\n> output_statement and friends for connection name reset. So the rules\n> that don't call the functions need to reset it by themselves.\n\nOh, I didn't notice that. Fixed.\nI'm wondering why a output function is not implemented, like output_describe_statement(),\nbut anyway I put a connection reset in ecpg.addons.\n\n> Similary, the following sequence doesn't yield an error, which is\n> expected.\n> \n> > EXEC SQL AT conn1 DECLARE stmt STATEMENT;\n> > EXEC SQL AT conn2 EXECUTE stmt INTO ..;\n> \n> In this case \"conn2\" set by the AT option is silently overwritten with\n> \"conn1\" by check_declared_list(). I think we should reject AT option\n> (with a different connection) in that case.\n\nActually this comes from Oracle's specification. Pro*C precompiler\noverwrite their connection in the situation, hence I followed that.\nBut I agree this might be confused and I added the warning report.\nHow do you think? Is it still strange?\n\nBest Regards,\nHayato Kuroda\nFUJITSU LIMITED\n\n\n\n", "msg_date": "Tue, 20 Jul 2021 08:02:44 +0000", "msg_from": "\"wangsh.fnst@fujitsu.com\" <wangsh.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Hello, Kuroda-san.\n\nAt Mon, 12 Jul 2021 04:05:21 +0000, \"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com> wrote in \n> > Similary, the following sequence doesn't yield an error, which is\n> > expected.\n> > \n> > > EXEC SQL AT conn1 DECLARE stmt STATEMENT;\n> > > EXEC SQL AT conn2 EXECUTE stmt INTO ..;\n> > \n> > In this case \"conn2\" set by the AT option is silently overwritten with\n> > \"conn1\" by check_declared_list(). I think we should reject AT option\n> > (with a different connection) in that case.\n> \n> Actually this comes from Oracle's specification. Pro*C precompiler\n> overwrite their connection in the situation, hence I followed that.\n> But I agree this might be confused and I added the warning report.\n> How do you think? Is it still strange?\n\n(I'm perplexed from what is done while precompilation and what is done\n at execution time...)\n\nHow Pro*C behaves in that case? If the second command ends with an\nerror, I think we are free to error out the second command before\nexecution. If it works... do you know what is happening at the time?\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center\n\n\n", "msg_date": "Wed, 21 Jul 2021 17:22:47 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "All,\n\n> between DECLARE and the past queries qualify as an open item.  I am\n> adding Michael Meskes in CC.  I got to wonder how much of a\n> compatibility break it would be for DEALLOCATE and DESCRIBE to handle\n> EXEC SQL AT in a way more consistent than DECLARE, even if these are\n> bounded to a result set, and not a connection.\n\nI just wanted to let you know that I'm well aware of this thread but\nneed to get through my backlog before I can comment. Sorry for the\ndelay.\n\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org", "msg_date": "Thu, 29 Jul 2021 11:22:37 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Thu, Jul 29, 2021 at 12:22 PM Michael Meskes <meskes@postgresql.org> wrote:\n> I just wanted to let you know that I'm well aware of this thread but\n> need to get through my backlog before I can comment. Sorry for the\n> delay.\n\nThe RMT discussed this recently. We are concerned about this issue,\nincluding how it has been handled so far.\n\nIf you're unable to resolve the issue (or at least commit time to\nresolving the issue) then the RMT will call for the revert of the\noriginal feature patch.\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Sat, 31 Jul 2021 01:01:44 +0300", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Fri, Jul 30, 2021 at 3:01 PM Peter Geoghegan <pg@bowt.ie> wrote:\n> The RMT discussed this recently. We are concerned about this issue,\n> including how it has been handled so far.\n>\n> If you're unable to resolve the issue (or at least commit time to\n> resolving the issue) then the RMT will call for the revert of the\n> original feature patch.\n\nThe RMT continues to be concerned about the lack of progress on this\nopen item, especially the lack of communication from Michael Meskes,\nthe committer of the original patch (commit ad8305a). We are prepared\nto exercise our authority to resolve open items directly. The only\nfallback option available to us is a straight revert of commit\nad8305a.\n\nWe ask that Michael Meskes give a status update here no later than\n23:59 on Fri 13 Aug (\"anywhere on earth\" timezone). This update should\ninclude a general assessment of the problem, a proposed resolution\n(e.g., committing the proposed patch from Hayato Kuroda), and an\nestimate of when we can expect the problem to be fully resolved. If\nMichael is unable to provide a status update by that deadline, or if\nMichael is unable to commit to a reasonably prompt resolution for this\nopen item, then the RMT will call for a revert without further delay.\n\nThe RMT's first responsibility is to ensure that PostgreSQL 14 is\nreleased on schedule. We would prefer to avoid a revert, but we cannot\nallow this to drag on indefinitely.\n\n--\nPeter Geoghegan\n\n\n", "msg_date": "Fri, 6 Aug 2021 18:30:44 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Hi Peter,\n\n> The RMT continues to be concerned about the lack of progress on this\n> open item, especially the lack of communication from Michael Meskes,\n> the committer of the original patch (commit ad8305a). We are prepared\n> to exercise our authority to resolve open items directly. The only\n> fallback option available to us is a straight revert of commit\n> ad8305a.\n> \n> We ask that Michael Meskes give a status update here no later than\n> 23:59 on Fri 13 Aug (\"anywhere on earth\" timezone). This update\n> should\n> include a general assessment of the problem, a proposed resolution\n> (e.g., committing the proposed patch from Hayato Kuroda), and an\n> estimate of when we can expect the problem to be fully resolved. If\n> Michael is unable to provide a status update by that deadline, or if\n> Michael is unable to commit to a reasonably prompt resolution for\n> this\n> open item, then the RMT will call for a revert without further delay.\n> \n> The RMT's first responsibility is to ensure that PostgreSQL 14 is\n> released on schedule. We would prefer to avoid a revert, but we\n> cannot\n> allow this to drag on indefinitely.\n\nI get it that the goal is to release PostgreSQL 14 and I also get it\nthat nobody is interested in the reasons for my slow reaction. I even,\nat least to an extend, understand why nobody tried reaching out to me\ndirectly. However, what I cannot understand at all is the tone of this\nemail. Is this the new way of communication in the PostgreSQL project?\n\nJust to be more precise, I find it highly offensive that you address an\nemail only to me (everyone else was on CC) and yet do not even try to \ntalk to me, but instead talk about me as a third person. I find this\nvery disrespectful. \n\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Sat, 07 Aug 2021 10:13:30 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Sat, Aug 7, 2021 at 1:13 AM Michael Meskes <meskes@postgresql.org> wrote:\n> I get it that the goal is to release PostgreSQL 14 and I also get it\n> that nobody is interested in the reasons for my slow reaction. I even,\n> at least to an extend, understand why nobody tried reaching out to me\n> directly.\n\nThat's simply not true. Andrew Dunstan reached out personally and got\nno response. He then reached out through a backchannel (a direct\ncoworker of yours), before finally getting a single terse response\nfrom you here.\n\nEvery one of us has a life outside of PostgreSQL. An individual\ncontributor may not be available, even for weeks at a time. It\nhappens. The RMT might well have been much more flexible if you\nengaged with us privately. There has not been a single iota of\ninformation for us to go on. That's why this happened.\n\n> However, what I cannot understand at all is the tone of this\n> email. Is this the new way of communication in the PostgreSQL project?\n\nThe tone was formal and impersonal because it represented the position\nof the RMT as a whole (not me personally), and because it's a\nparticularly serious matter for the RMT. It concerned the RMT\nexercising its authority to resolve open items directly, in this case\nby calling for a revert. This is the option of last resort for us, and\nit was important to clearly signal that we had reached that point.\n\nNo other committer (certainly nobody on the RMT) knows anything about\necpg. How much longer were you expecting us to wait for a simple\nstatus update?\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Sat, 7 Aug 2021 11:00:24 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> That's simply not true. Andrew Dunstan reached out personally and got\n> no response. He then reached out through a backchannel (a direct\n> coworker of yours), before finally getting a single terse response\n> from you here.\n\nYou do know that I did not receive any email from Andrew. After all I\nexplained this to the backchannel you mentioned. I do not know what\nhappened, I do not even know if it was one email or several, but I\nchecked everything, there simply is no such email in my mailbox. \n\n> Every one of us has a life outside of PostgreSQL. An individual\n> contributor may not be available, even for weeks at a time. It\n> happens. The RMT might well have been much more flexible if you\n> engaged with us privately. There has not been a single iota of\n> information for us to go on. That's why this happened.\n\nAgain, I didn't know the RMT was expecting anything from me. Yes, I\nknew I needed to spend some time on a technical issues, but that's\nexactly the information I had at the time.\n\n> \n> The tone was formal and impersonal because it represented the\n> position\n> of the RMT as a whole (not me personally), and because it's a\n> particularly serious matter for the RMT. It concerned the RMT\n> exercising its authority to resolve open items directly, in this case\n> by calling for a revert. This is the option of last resort for us,\n> and\n> it was important to clearly signal that we had reached that point.\n\nPlease read my prior email completely, I did go into detail about what\nI meant with tone. I don't mind a formal wording and I completely agree\nthat a decision has to be made at some point. I was wrong in thinking\nthere was more time left, but that's also not the point. The point is\nthat you talk *about* me in the third person in an email you address at\nme. It might be normal for you, but in my neck of the woods this is\nvery rude behavior. \n\n> No other committer (certainly nobody on the RMT) knows anything about\n> ecpg. How much longer were you expecting us to wait for a simple\n> status update?\n\nWhere did I say I expect you to wait? How could I even do that given\nthat I didn't even know you were waiting for a status update from me?\n\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Sat, 07 Aug 2021 21:43:15 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "\nOn 8/7/21 3:43 PM, Michael Meskes wrote:\n>\n>> No other committer (certainly nobody on the RMT) knows anything about\n>> ecpg. How much longer were you expecting us to wait for a simple\n>> status update?\n> Where did I say I expect you to wait? How could I even do that given\n> that I didn't even know you were waiting for a status update from me?\n>\n\nMichael,\n\n\nDuring the Beta period, every open item is the responsibility of the\nrelevant committer. There is an expectation that each item will be dealt\nwith in a timely fashion. It is the RMT's responsibility to monitor the\nopen items list and take action if any item on the list endangers the\ntiming or stability of the release.\n\nIn the present instance all we have had from you is a terse statement\nthat you were under pressure of work, with the implication that you\nwould deal with the item in an unspecified way at an unspecified time in\nthe future. We don't think that meets the requirements I stated above.\n\nW.r.t. previous contact regarding this item, I sent you email on July\n23rd. It was addressed to  <mailto:meskes@postgresql.org> and had this text:\n\n> Michael,\n>\n>\n> This is an open item for release 14. Please let us know if you are going\n> to attend to it.\n>\n\nOther committers with items on the list can probably testify to the fact\nthat we have dropped them similar notes via email or messenger app, so\nyou're certainly not being singled out.\n\nPeter followed up to your eventual note on the list on the 30th. So\nwe've taken substantial steps to make you aware of what is expected.\n\n\ncheers\n\n\nandrew\n\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Sat, 7 Aug 2021 16:49:18 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Sat, Aug 7, 2021 at 12:43 PM Michael Meskes <meskes@postgresql.org> wrote:\n> Again, I didn't know the RMT was expecting anything from me. Yes, I\n> knew I needed to spend some time on a technical issues, but that's\n> exactly the information I had at the time.\n\nAs Andrew mentioned, I sent you an email on the 30th -- a full week\nprior to the email that formally timeboxed this open item. That\nearlier email is here:\n\nhttps://postgr.es/m/CAH2-Wzk=QxtSp0H5EKV92EH0u22HVMQLHGwYP4_FA3yTiEi9Yg@mail.gmail.com\n\nI really don't know why you're surprised that the issue came to a head\nwith yesterday's email. This earlier email was similar in tone, and\nyet went completely unanswered for a full week. This situation has\nbeen steadily escalating for quite a while now.\n\n> Please read my prior email completely, I did go into detail about what\n> I meant with tone. I don't mind a formal wording and I completely agree\n> that a decision has to be made at some point. I was wrong in thinking\n> there was more time left, but that's also not the point. The point is\n> that you talk *about* me in the third person in an email you address at\n> me. It might be normal for you, but in my neck of the woods this is\n> very rude behavior.\n\nI also talked about the RMT in the third person. My intent was to make\nthe message legalistic and impersonal. That's what is driving our\nthinking on this -- the charter of the RMT.\n\nThe RMT primarily exists to resolve open items that risk holding up\nthe release. When any committer of any patch simply doesn't respond in\nany substantive way to the RMT (any RMT), the RMT is all but forced to\nfall back on the crude option of reverting the patch. I cannot imagine\nany other outcome if other individuals were involved, or if the\ndetails were varied.\n\nWe're all volunteers, just like you. I happen to be a big believer in\nour culture of personal ownership and personal responsibility. But you\nsimply haven't engaged with us at all.\n\n> Where did I say I expect you to wait? How could I even do that given\n> that I didn't even know you were waiting for a status update from me?\n\nYou didn't say anything at all, which speaks for itself. And makes it\nimpossible for us to be flexible.\n\n--\nPeter Geoghegan\n\n\n", "msg_date": "Sat, 7 Aug 2021 15:31:49 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Sat, 2021-08-07 at 15:31 -0700, Peter Geoghegan wrote:\n> On Sat, Aug 7, 2021 at 12:43 PM Michael Meskes\n> <meskes@postgresql.org> wrote:\n> > Again, I didn't know the RMT was expecting anything from me. Yes, I\n> > knew I needed to spend some time on a technical issues, but that's\n> > exactly the information I had at the time.\n> \n> As Andrew mentioned, I sent you an email on the 30th -- a full week\n> prior to the email that formally timeboxed this open item. That\n> earlier email is here:\n> \n> https://postgr.es/m/CAH2-Wzk=QxtSp0H5EKV92EH0u22HVMQLHGwYP4_FA3yTiEi9Yg@mail.gmail.com\n\nThis email said nothing about sending a status update or a deadline or\nany question at all, only that you'd revert the patch if I was unable\nto resolve the issue. So what's your point? \n\n\n> I also talked about the RMT in the third person. My intent was to\n> make\n\nSo? It's okay to disrespect a person if you mention the team that you\nare representing in the third person, too?\n\n> the message legalistic and impersonal. That's what is driving our\n> thinking on this -- the charter of the RMT.\n\nPlease show me where the charter says that disrespecting a person is\nfine. And while we're add it, does the code of conduct say anything\nabout the way people should be treated? \n\n> We're all volunteers, just like you. I happen to be a big believer in\n> our culture of personal ownership and personal responsibility. But\n> you\n> simply haven't engaged with us at all.\n\nWhich I tried to explain several times, but apparently not well enough.\nLet me give you a short rundown from my perspective:\n\n- A patch is sent that I mistakenly thought was a new feature and thus\ndid not apply time too immediately.\n- After a while I get an email from you as spokesperson of the RMT that\nif this is not fixed it'll have to be reverted eventually.\n- I learn that Andrew tried to reach me. Again, I believe you Andrew,\nthat you sent the email, but I never saw it. Whether it's some\nfiltering or a user error that made it disappear, I have no idea, but\nI'm surely sorry about that.\n- I receive that email we keep talking about, the one in which you\ntreat me like I'm not even worth being addressed.\n\n> You didn't say anything at all, which speaks for itself. And makes it\n> impossible for us to be flexible.\n\nWhich flexibility did I ask for? It'd be nice if you only accused me of\nthings I did.\n\nHonestly I do not understand you at all. You keep treating me like I\nwas asking for anything unreasonable while I'm only trying to explain\nwhy I didn't act earlier. The only issue I have is the rude treatment\nyou gave me. \n\nJust for the record, of course I'm going to look into the issue before\nyour deadline and will send a status update.\n\nMichael\n\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Sun, 08 Aug 2021 20:34:53 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Sun, Aug 8, 2021 at 11:34 AM Michael Meskes <meskes@postgresql.org> wrote:\n> > https://postgr.es/m/CAH2-Wzk=QxtSp0H5EKV92EH0u22HVMQLHGwYP4_FA3yTiEi9Yg@mail.gmail.com\n>\n> This email said nothing about sending a status update or a deadline or\n> any question at all, only that you'd revert the patch if I was unable\n> to resolve the issue. So what's your point?\n\nI think that it's crystal clear what I meant in the email of July 30.\nI meant: it's not okay that you're simply ignoring the RMT. You hadn't\neven made a token effort at that point. For example you didn't give\nthe proposed fix a cursory 15 minute review, just so we had some very\nrough idea of where things stand. You still haven't.\n\nMy understanding of what you're taking issue with (perhaps a flawed\nunderstanding) is that you think that you have been treated unfairly\nor arbitrarily in general. That's why I brought up the email of July\n30 yesterday. So my point was: no, you haven't been treated unfairly.\nIf you only take issue with the specific tone and tenor of my email\nfrom Friday (the email that specified a deadline), and not the content\nitself, then maybe the timeline and the wider context are not so\nimportant.\n\nI am still unsure about whether your concern is limited to the tone of\nthe email from Friday, or if you also take exception to the content of\nthat email (and the wider context).\n\n> > I also talked about the RMT in the third person. My intent was to\n> > make\n>\n> So? It's okay to disrespect a person if you mention the team that you\n> are representing in the third person, too?\n\nPerhaps the tone of my email from Friday was unhelpful. Even still, I\nam surprised that you seem to think that it was totally outrageous --\nespecially given the context. It was the first email that you\nresponded to *at all* on this thread, with the exception of your\noriginal terse response. I am not well practised in communicating with\na committer that just doesn't engage with the RMT at all. This is all\nnew to me. I admit that I found it awkward to write the email for my\nown reasons.\n\n> > You didn't say anything at all, which speaks for itself. And makes it\n> > impossible for us to be flexible.\n>\n> Which flexibility did I ask for? It'd be nice if you only accused me of\n> things I did.\n\nI brought up flexibility to point out that this could have been\navoided. If you needed more time, why didn't you simply ask for it?\n\nAgain, the scope of what you're complaining about was (and still is)\nunclear to me.\n\n> Just for the record, of course I'm going to look into the issue before\n> your deadline and will send a status update.\n\nThank you.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Sun, 8 Aug 2021 12:48:44 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> I think that it's crystal clear what I meant in the email of July 30.\n> I meant: it's not okay that you're simply ignoring the RMT. You\n> hadn't\n> even made a token effort at that point. For example you didn't give\n> the proposed fix a cursory 15 minute review, just so we had some very\n> rough idea of where things stand. You still haven't.\n\nHow do you know I didn't spend 15 minutes looking at the patch and the\nwhole email thread? I surely did and it was more than 15 minutes, but\nnot enough to give a meaningful comment. If you can do it in 15\nminutes, great for you, I cannot.\n\nThe meaning of your email of July 30 was crystal clear, yes. It means\nyou'd revert the patch if I didn't resolve the issue. This is literally\nwhat it says. If you meant to say \"It's not okay that you're simply\nignoring the RMT. You hadn't even made a token effort at that point.\"\nit might have been helpful if you said that, instead of having me guess\nif there was a hidden meaning in your email.\n\nBesides, I have not ignored the RMT. I don't know why you keep\ninsisting on something that is simply not true.\n\n> My understanding of what you're taking issue with (perhaps a flawed\n> understanding) is that you think that you have been treated unfairly\n> or arbitrarily in general. That's why I brought up the email of July\n> 30 yesterday. So my point was: no, you haven't been treated unfairly.\n\nYes, this is a flawed understanding. I'm sorry you came to that\nunderstanding, I though my emails were pretty clear as to what I was\nobjecting to.\n\n> If you only take issue with the specific tone and tenor of my email\n> from Friday (the email that specified a deadline), and not the\n> content\n> itself, then maybe the timeline and the wider context are not so\n> important.\n> \n> I am still unsure about whether your concern is limited to the tone\n> of\n> the email from Friday, or if you also take exception to the content\n> of\n> that email (and the wider context).\n\nAt the risk of repeating myself, my concern is *only* the rude and\ndisrespectful way of addressing me in the third person while talking to\nme directly. Again, I though I made that clear in my first email about\nthe topic and every one that followed.\n\n> Perhaps the tone of my email from Friday was unhelpful. Even still, I\n> am surprised that you seem to think that it was totally outrageous --\n> especially given the context. It was the first email that you\n\nThe context never makes a derogative communication okay, at least not\nin my opinion.\n\n> responded to *at all* on this thread, with the exception of your\n> original terse response. I am not well practised in communicating\n> with\n> a committer that just doesn't engage with the RMT at all. This is all\n> new to me. I admit that I found it awkward to write the email for my\n> own reasons.\n\nI was *never* asked for *any* response in *any* email, save the\noriginal technical discussion, which I did answer with telling people\nthat I'm lacking time but will eventually get to it. Just to be\nprecise, your email from July 30 told me and everyone how this will be\nhandled. A reasonable procedure, albeit not one we'd like to see\nhappen. But why should I answer and what? It's not that you bring this\nup as a discussion point, but as a fact.\n\n> I brought up flexibility to point out that this could have been\n> avoided. If you needed more time, why didn't you simply ask for it?\n\nThe first conversation that brought up the time issue was your email\nthat started this thread. There you set a deadline which I understand\nand accept. But then I never said a word against it, so the question\nremains, why accusing me of something I never did?\n\n> Again, the scope of what you're complaining about was (and still is)\n> unclear to me.\n\nI'm sorry, but I have no idea how to explain it more clearly. I'm not\nasking for any favor or special treatment, I just ask to be treated\nlike a person.\n\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Mon, 09 Aug 2021 09:10:44 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Dear Hackers,\r\n\r\nI perfectly missed mails and 8/9 was a national holiday.\r\nI must apologize about anything. Very sorry for inconvenience.\r\n\r\n> The RMT's first responsibility is to ensure that PostgreSQL 14 is\r\n> released on schedule. We would prefer to avoid a revert, but we cannot\r\n> allow this to drag on indefinitely.\r\n\r\nOf cause I will try to avoid it but I can understand doing your business.\r\n\r\nDear Meskes,\r\n\r\nI summarize the thread.\r\nAs you might know DECLARE STATEMENT has been added from PG14, but I\r\nfound that connection-control feature cannot be used for DEALLOCATE\r\nand DESCRIBE statement (More details, please see patches or ask me).\r\nBut we have a question - what statement should use the associated\r\nconnection? Obviously DEALLOCATE statement should follow the linked\r\nconnection because the statement uses only one sql identifier. In\r\nDESCRIBE or any other descriptor-related statements, however, I think\r\nit is non-obvious because they have also descriptor-name. Currently I\r\nmade patches that includes about DESCRIBE, but I'm wondering this\r\napproach is correct. I want you to ask your opinion about the scope of\r\nDECLARE STATEMENT.\r\nCoding is not hard hence I think we can fix this until the end of Sep.\r\nif we set a policy correctly and have reviewers.\r\n\r\nBest Regards,\r\nHayato Kuroda\r\nFUJITSU LIMITED\r\n\r\n", "msg_date": "Mon, 9 Aug 2021 17:00:56 +0000", "msg_from": "\"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com>", "msg_from_op": true, "msg_subject": "RE: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Dear Wang,\n\nGood reporting!\n\n> I'm not sure, but how about modify the ecpg.trailer:\n> > statement: ecpgstart at toplevel_stmt ';' { connection = NULL; }\n> > | ecpgstart toplevel_stmt ';'\n> I think there are lots of 'connection = NULL;' in source, and we should find a\ngood location to reset the connection.\n\nThank you for giving a solution! I will consider the idea and\nintegrate it if it's OK.\n\nBest Regards,\nHayato Kuroda\nFUJITSU LIMITED\n\n\n\n", "msg_date": "Mon, 9 Aug 2021 17:01:36 +0000", "msg_from": "\"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com>", "msg_from_op": true, "msg_subject": "RE: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Dear Horiguchi-san,\n\n> How Pro*C behaves in that case? If the second command ends with an\n> error, I think we are free to error out the second command before\n> execution. If it works... do you know what is happening at the time?\n\nYou asked that how Oracle works when the followings precompiled and\nexecuted, don't it?\n> > > EXEC SQL AT conn1 DECLARE stmt STATEMENT;\n> > > EXEC SQL AT conn2 EXECUTE stmt INTO ..;\n\n While precompiling, it does not throw any errors. While executing,\nthe second statement will execute at conn1 without warnings.\nSo the added warning is postgres-extended.\n\nBest Regards,\nHayato Kuroda\nFUJITSU LIMITED\n\n\n\n", "msg_date": "Mon, 9 Aug 2021 17:01:57 +0000", "msg_from": "\"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com>", "msg_from_op": true, "msg_subject": "RE: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "\nDear Kuroda-san,\n\n> I perfectly missed mails and 8/9 was a national holiday.\n> I must apologize about anything. Very sorry for inconvenience.\n\nNo need to, non of it is your fault.\n\n> I summarize the thread.\n\nThank you so much, this is very, very helpful.\n\n> As you might know DECLARE STATEMENT has been added from PG14, but I\n> found that connection-control feature cannot be used for DEALLOCATE\n> and DESCRIBE statement (More details, please see patches or ask me).\n> But we have a question - what statement should use the associated\n> connection? Obviously DEALLOCATE statement should follow the linked\n> connection because the statement uses only one sql identifier. In\n> DESCRIBE or any other descriptor-related statements, however, I think\n> it is non-obvious because they have also descriptor-name. Currently I\n> made patches that includes about DESCRIBE, but I'm wondering this\n> approach is correct. I want you to ask your opinion about the scope\n> of\n> DECLARE STATEMENT.\n\nI've been reading through quite a few documents to come up with a good\nreason one way or the other, but as you already pointed out yourself,\nother database systems seem to not be consequent about the usage\neither. \n\nUnfortunately I didn't find my copy of the SQL standard. But then I\nkind of doubt it has much to say about this particular issue.\n\nAnyway, I'm currently leaning towards including DESCRIBE in the list of\nstatements that are influenced by DECLARE STATEMENT. My reason is that\nDESCRIBE can be issued with an AT clause already, regardless whether it\nmakes sense or not. Or in other words, if we allow it to get a\nconnection name one way, why should we forbid the other way. To me this\nseems to be more confusing than the current situation.\n\nThe alternative would be to forbid using an AT clause with DESCRIBE,\nwhich would definitely be a compatibility change, although, again, not\na functional one.\n\n> Coding is not hard hence I think we can fix this until the end of\n> Sep.\n> if we set a policy correctly and have reviewers.\n\nFully agreed. That's why a short glance at the patch didn't suffice to\nanswer this. I didn't see any issues with the patch so far. Please send\nit to me once its finished (or is it already?) and I'll give it a run,\ntoo.\n\nHopefully I caught up on all emails and didn't miss parts of the\nthread.\n\nThanks,\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Mon, 09 Aug 2021 20:21:00 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Mon, Aug 9, 2021 at 12:10 AM Michael Meskes <meskes@postgresql.org> wrote:\n> How do you know I didn't spend 15 minutes looking at the patch and the\n> whole email thread? I surely did and it was more than 15 minutes, but\n> not enough to give a meaningful comment. If you can do it in 15\n> minutes, great for you, I cannot.\n\nThat was just an example of a token response. I don't know anything about ecpg.\n\n> Besides, I have not ignored the RMT. I don't know why you keep\n> insisting on something that is simply not true.\n\nMy email of July 30 was itself pretty strongly worded, but went\nunanswered for a full week. Not even a token response. If that doesn't\ncount as \"ignoring the RMT\", then what does? How much time has to pass\nbefore radio silence begins to count as \"ignoring the RMT\", in your\nview of things? A month? More?\n\n> At the risk of repeating myself, my concern is *only* the rude and\n> disrespectful way of addressing me in the third person while talking to\n> me directly. Again, I though I made that clear in my first email about\n> the topic and every one that followed.\n\nOkay, I understand that now.\n\n> I was *never* asked for *any* response in *any* email, save the\n> original technical discussion, which I did answer with telling people\n> that I'm lacking time but will eventually get to it. Just to be\n> precise, your email from July 30 told me and everyone how this will be\n> handled. A reasonable procedure, albeit not one we'd like to see\n> happen. But why should I answer and what? It's not that you bring this\n> up as a discussion point, but as a fact.\n\nAs Andrew pointed out, there is a general expectation that committers\ntake care of their own open items. That doesn't mean that they are\nobligated to personally fix bugs. Just that the final responsibility\nto make sure that the issue is resolved rests with the committer. This\nis one of the few hard rules that we have.\n\nAs I've said before, RMT-driven revert is something that I see as an\noption of last resort. The RMT charter doesn't go quite that far, but\nI'd argue that my interpretation is quite natural given how committer\nresponsibility works in general. In other words, I personally believe\nthat our bottom-up approach is on balance a good one, and should be\npreserved.\n\nMaybe the issue is muddied by the fact that we each have different\nviews of the community process, at a high level (I'm unsure). Unlike\nyou, I don't believe that RMT-driven revert is \"a reasonable\nprocedure\". I myself see the RMT's power to resolve open items in this\nway as a necessary evil. Something to be avoided at all costs. Why\nshould people that don't know anything about ecpg make decisions about\necpg? In general they should not.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Mon, 9 Aug 2021 11:30:02 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> My email of July 30 was itself pretty strongly worded, but went\n> unanswered for a full week. Not even a token response. If that\n> doesn't\n> count as \"ignoring the RMT\", then what does? How much time has to\n> pass\n> before radio silence begins to count as \"ignoring the RMT\", in your\n> view of things? A month? More?\n\nIf you want me to answer, how about asking a question? Or telling me\nthat you'd like some feedback? I don't see how I should know that you\nexpect a reply to a simple statement of facts.\n\n> Okay, I understand that now.\n\nAnd? Do you care at all?\n\n> As Andrew pointed out, there is a general expectation that committers\n> take care of their own open items. That doesn't mean that they are\n> obligated to personally fix bugs. Just that the final responsibility\n> to make sure that the issue is resolved rests with the committer.\n> This\n> is one of the few hard rules that we have.\n\nSure, I don't question that. Again, I knew about the issue, only\nmisjudged it in the beginning. Your email from July 30 did show me that\nit was more urgent but still didn't create the impression that there\nwas such a short deadline. In my opinion my prior email already\nexplained that I was on it, but couldn't give an estimate.\n\n> As I've said before, RMT-driven revert is something that I see as an\n> option of last resort. The RMT charter doesn't go quite that far, but\n> I'd argue that my interpretation is quite natural given how committer\n> responsibility works in general. In other words, I personally believe\n> that our bottom-up approach is on balance a good one, and should be\n> preserved.\n\nFair enough, to me a revert is a revert, no matter who issues it.\n\n> Maybe the issue is muddied by the fact that we each have different\n> views of the community process, at a high level (I'm unsure). Unlike\n> you, I don't believe that RMT-driven revert is \"a reasonable\n> procedure\". I myself see the RMT's power to resolve open items in\n> this\n> way as a necessary evil. Something to be avoided at all costs. Why\n> should people that don't know anything about ecpg make decisions\n> about\n> ecpg? In general they should not.\n\nWell, you did lay out what the decision would be and I fully agreed\nwith it. So again, what was there to do? Had you asked me if I agreed,\nI would told you. \n\nMichael\n\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Mon, 09 Aug 2021 20:45:17 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Mon, Aug 9, 2021 at 11:45 AM Michael Meskes <meskes@postgresql.org> wrote:\n> If you want me to answer, how about asking a question? Or telling me\n> that you'd like some feedback? I don't see how I should know that you\n> expect a reply to a simple statement of facts.\n\nI expressed concern in fairly strong terms, and received no answer for\na full week. I consider that \"ignoring the RMT\", but you take issue\nwith that characterization because my email didn't ask an explicit\nquestion with a question mark. Despite the fact that it is generally\nunderstood that \"committers own their own items\", and that the RMT\nexists as a final check on that.\n\nClearly we disagree about this. I don't think that there is anything\nto be gained from discussing this any further, though. I suggest that\nwe leave it at that.\n\n> > Okay, I understand that now.\n>\n> And? Do you care at all?\n\nI don't want to upset anybody for any reason. I regret that my words\nhave upset you, but I think that they were misinterpreted in a way\nthat I couldn't possibly have predicted. The particular aspect of last\nFriday's email that you took exception to was actually intended to\nconvey that it was not personal. Remember, my whole ethos is to avoid\nstrong RMT intervention when possible, to make it impersonal. My\nframing of things had the opposite effect to the one I'd intended,\nironically.\n\n> Sure, I don't question that. Again, I knew about the issue, only\n> misjudged it in the beginning. Your email from July 30 did show me that\n> it was more urgent but still didn't create the impression that there\n> was such a short deadline. In my opinion my prior email already\n> explained that I was on it, but couldn't give an estimate.\n\nHow could anybody on the RMT judge what was going on sensibly? There\nwas *zero* information from you (the original committer, our point of\ncontact) about an item that is in a totally unfamiliar part of the\ncode to every other committer. We were effectively forced to make very\nconservative assumptions about the deadline. I think that it's very\nlikely that this could have been avoided if only you'd engaged to some\ndegree -- if you had said it was a short deadline then we'd likely\nhave taken your word for it, as the relevant subject matter expert and\ncommitter in charge of the item. But we were never given that choice.\n\n> Well, you did lay out what the decision would be and I fully agreed\n> with it. So again, what was there to do? Had you asked me if I agreed,\n> I would told you.\n\nIf the patch being reverted was so inconsequential to you that you\ndidn't even feel the need to write a brief email about it, why did you\ncommit it in the first place? I just don't understand this at all.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Mon, 9 Aug 2021 12:30:39 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Sat, Aug 7, 2021 at 4:13 AM Michael Meskes <meskes@postgresql.org> wrote:\n> I get it that the goal is to release PostgreSQL 14 and I also get it\n> that nobody is interested in the reasons for my slow reaction. I even,\n> at least to an extend, understand why nobody tried reaching out to me\n> directly. However, what I cannot understand at all is the tone of this\n> email. Is this the new way of communication in the PostgreSQL project?\n>\n> Just to be more precise, I find it highly offensive that you address an\n> email only to me (everyone else was on CC) and yet do not even try to\n> talk to me, but instead talk about me as a third person. I find this\n> very disrespectful.\n\nHi,\n\nFWIW, I don't think that the phrasing of Peter's email is\ndisrespectful. As I read it, it simply states that the RMT has made a\ndecision to revert the patch unless certain assurances are given\nbefore a certain date. I don't expect anyone will particularly like\nreceiving such an email, because nobody likes to be threatened with a\nrevert, but I don't think there is anything rude about it. Either you\nare willing to commit to resolving the problem by a date that the RMT\nfinds acceptable, or you aren't. If you are, great. If you aren't, the\npatch is going to get reverted. That sucks, but it's nothing against\nyou personally; it's just what happens sometimes. I also feel rather\nstrongly that being a member of the RMT is a pretty thankless task,\ninvolving going through a lot of patches that you may not care about\nand trying to make decisions that will benefit the project, even while\nknowing that some people may not like them. We should give people who\nare willing to offer such service the benefit of the doubt.\n\nOn the substance of the issue, one question that I have reading over\nthis thread is whether there's really a bug here at all. It is being\nrepresented (and I have not checked whether this is accurate) that the\npatch affects the behavior of DECLARE, PREPARE, and EXECUTE, but not\nDESCRIBE, DEALLOCATE, DECLARE CURSOR .. FOR, or CREATE TABLE AS\nEXECUTE. However, it also seems that it's not entirely clear what the\nbehavior ought to be in those cases, except perhaps for DESCRIBE. If\nthat's the case, maybe this doesn't really need to be an open item,\nand maybe we don't therefore need to talk about whether it should be\nreverted. Maybe it's just a feature that supports certain things now\nand in the future, after due reflection, perhaps it will support more.\n\nI would be interested in hearing your view, and that of others, on\nwhether this is really a bug at all.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Mon, 9 Aug 2021 16:28:25 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> question with a question mark. Despite the fact that it is generally\n> understood that \"committers own their own items\", and that the RMT\n> exists as a final check on that.\n\nThis does not contradict my opinion, but anyway. \n\n> Clearly we disagree about this. I don't think that there is anything\n> to be gained from discussing this any further, though. I suggest that\n> we leave it at that.\n\nAgreed.\n\n> I don't want to upset anybody for any reason. I regret that my words\n> have upset you, but I think that they were misinterpreted in a way\n> that I couldn't possibly have predicted. The particular aspect of\n\nI strongly object to that. It's pretty obvious to me that addressing\npeople in third person is very offending.\n\n> last\n> Friday's email that you took exception to was actually intended to\n> convey that it was not personal. Remember, my whole ethos is to avoid\n> strong RMT intervention when possible, to make it impersonal. My\n> framing of things had the opposite effect to the one I'd intended,\n> ironically.\n\nLet me stress again that the third person part is the bad thing in my\nopinion, not the rest of the words.\n \n> How could anybody on the RMT judge what was going on sensibly? There\n> was *zero* information from you (the original committer, our point of\n> contact) about an item that is in a totally unfamiliar part of the\n> code to every other committer. We were effectively forced to make\n> very\n> conservative assumptions about the deadline. I think that it's very\n> likely that this could have been avoided if only you'd engaged to\n> some\n> degree -- if you had said it was a short deadline then we'd likely\n> have taken your word for it, as the relevant subject matter expert\n> and\n> committer in charge of the item. But we were never given that choice.\n\nThe same holds the other way round, I only understood later that you\nwanted more information. Had I known that earlier, I would have gladly\ngiven them. \n\n> > Well, you did lay out what the decision would be and I fully agreed\n> > with it. So again, what was there to do? Had you asked me if I\n> > agreed,\n> > I would told you.\n> \n> If the patch being reverted was so inconsequential to you that you\n> didn't even feel the need to write a brief email about it, why did\n> you\n> commit it in the first place? I just don't understand this at all.\n\nI'm getting very tired of you accusing me of things I neither said nor\ndid. Please stop doing that or show me the email where I said the patch\nwas \"inconsequential\"? As for writing a brief email, please read all\nthe other emails in this thread, I've explained myself repeatedly\nalready.\n\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Mon, 09 Aug 2021 22:38:07 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Hi,\n\n> FWIW, I don't think that the phrasing of Peter's email is\n> disrespectful. As I read it, it simply states that the RMT has made a\n\nAs I said before, it might be a cultural difference. What I don't\nunderstand is, that a simple \"Hi Michael, this is what the RMT\ndecided:\" would have been sufficient to make this email okay. I take\noffense in being addressed in third person only.\n\n> strongly that being a member of the RMT is a pretty thankless task,\n\nThat I agree with.\n\n> On the substance of the issue, one question that I have reading over\n> this thread is whether there's really a bug here at all. It is being\n> represented (and I have not checked whether this is accurate) that\n> the\n> patch affects the behavior of  DECLARE, PREPARE, and EXECUTE, but not\n> DESCRIBE, DEALLOCATE, DECLARE CURSOR .. FOR, or CREATE TABLE AS\n> EXECUTE. However, it also seems that it's not entirely clear what the\n> behavior ought to be in those cases, except perhaps for DESCRIBE. If\n> that's the case, maybe this doesn't really need to be an open item,\n> and maybe we don't therefore need to talk about whether it should be\n> reverted. Maybe it's just a feature that supports certain things now\n> and in the future, after due reflection, perhaps it will support\n> more.\n\nThe way I see it we should commit the patch that makes more statements\nhonor the new DECLARE STATEMENT feature. I don't think we can change\nanything for the worse by doing that. And other databases are not\nreally strict about this either.\n\n> I would be interested in hearing your view, and that of others, on\n> whether this is really a bug at all.\n\nI think the question is more which version of the patch we commit as it\ndoes increase the functionality of ecpg.\n\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Mon, 09 Aug 2021 22:50:29 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Mon, Aug 9, 2021 at 1:38 PM Michael Meskes <meskes@postgresql.org> wrote:\n\n> > I don't want to upset anybody for any reason. I regret that my words\n> > have upset you, but I think that they were misinterpreted in a way\n> > that I couldn't possibly have predicted. The particular aspect of\n>\n> I strongly object to that. It's pretty obvious to me that addressing\n> people in third person is very offending.\n>\n\nAnd using the third person to avoid making things personal, and properly\nrepresent one's role as a representative as opposed to an individual, is\nsomething at least two of us consider to be \"professional\". If others\ntaking on a professional/formal tone with you is offending I politely\nsuggest you need to at least cut them some slack when you haven't informed\nthem of this previously. Cultural differences happen in both directions.\n\n> How could anybody on the RMT judge what was going on sensibly? There\n> > was *zero* information from you (the original committer, our point of\n> > contact) about an item that is in a totally unfamiliar part of the\n> > code to every other committer. We were effectively forced to make\n> > very\n> > conservative assumptions about the deadline. I think that it's very\n> > likely that this could have been avoided if only you'd engaged to\n> > some\n> > degree -- if you had said it was a short deadline then we'd likely\n> > have taken your word for it, as the relevant subject matter expert\n> > and\n> > committer in charge of the item. But we were never given that choice.\n>\n> The same holds the other way round, I only understood later that you\n> wanted more information. Had I known that earlier, I would have gladly\n> given them.\n\n\nThere is clearly an expectation from the RMT, and at least myself, that:\n\n\"The RMT discussed this recently. We are concerned about this issue,\nincluding how it has been handled so far.\n\nIf you're unable to resolve the issue (or at least commit time to\nresolving the issue) then the RMT will call for the revert of the\noriginal feature patch.\"\n\nis expected to elicit a response from the comitter in a timely fashion.\nReally, any email sent to -hackers from the RMT about a specific commit; or\neven any email sent to -hackers by a core team member, is expected to be\nresponded to in a timely manner. These teams should not be getting\ninvolved with the day-to-day operations and being responsive to them is\npart of the obligation of being a committer.\n\nIn hindsight probably the quoted email above should have been worded.\n\n\"If you're unable to resolve the issue, or communicate a timely plan for\ndoing so, within the next week please revert the patch.\"\n\nMaking it clear that the committer should be the one performing the\nrevert. Then, absent feedback or a revert, the second email and the RMT\nteam performing the revert, would be appropriate.\n\nDavid J.\n\nOn Mon, Aug 9, 2021 at 1:38 PM Michael Meskes <meskes@postgresql.org> wrote:> I don't want to upset anybody for any reason. I regret that my words\n> have upset you, but I think that they were misinterpreted in a way\n> that I couldn't possibly have predicted. The particular aspect of\n\nI strongly object to that. It's pretty obvious to me that addressing\npeople in third person is very offending.And using the third person to avoid making things personal, and properly represent one's role as a representative as opposed to an individual, is something at least two of us consider to be \"professional\".  If others taking on a professional/formal tone with you is offending I politely suggest you need to at least cut them some slack when you haven't informed them of this previously.  Cultural differences happen in both directions.\n> How could anybody on the RMT judge what was going on sensibly? There\n> was *zero* information from you (the original committer, our point of\n> contact) about an item that is in a totally unfamiliar part of the\n> code to every other committer. We were effectively forced to make\n> very\n> conservative assumptions about the deadline. I think that it's very\n> likely that this could have been avoided if only you'd engaged to\n> some\n> degree -- if you had said it was a short deadline then we'd likely\n> have taken your word for it, as the relevant subject matter expert\n> and\n> committer in charge of the item. But we were never given that choice.\n\nThe same holds the other way round, I only understood later that you\nwanted more information. Had I known that earlier, I would have gladly\ngiven them.There is clearly an expectation from the RMT, and at least myself, that:\"The RMT discussed this recently. We are concerned about this issue,including how it has been handled so far.If you're unable to resolve the issue (or at least commit time toresolving the issue) then the RMT will call for the revert of theoriginal feature patch.\"is expected to elicit a response from the comitter in a timely fashion.  Really, any email sent to -hackers from the RMT about a specific commit; or even any email sent to -hackers by a core team member, is expected to be responded to in a timely manner.  These teams should not be getting involved with the day-to-day operations and being responsive to them is part of the obligation of being a committer.In hindsight probably the quoted email above should have been worded.\"If you're unable to resolve the issue, or communicate a timely plan for doing so, within the next week please revert the patch.\"Making it clear that the committer should be the one performing the revert.  Then, absent feedback or a revert, the second email and the RMT team performing the revert, would be appropriate.David J.", "msg_date": "Mon, 9 Aug 2021 13:55:51 -0700", "msg_from": "\"David G. Johnston\" <david.g.johnston@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Mon, Aug 9, 2021 at 10:38:07PM +0200, Michael Meskes wrote:\n> > Clearly we disagree about this. I don't think that there is anything\n> > to be gained from discussing this any further, though. I suggest that\n> > we leave it at that.\n> \n> Agreed.\n> \n> > I don't want to upset anybody for any reason. I regret that my words\n> > have upset you, but I think that they were misinterpreted in a way\n> > that I couldn't possibly have predicted. The particular aspect of\n> \n> I strongly object to that. It's pretty obvious to me that addressing\n> people in third person is very offending.\n\nSo, you object to him referring to you in the third person in an email,\nand you object to him saying it was \"misinterpreted\". Are you going to\nobject to my email too?\n\nI think everyone can accept that you interpreted what Peter said as\noffensive, but you must also give the same acceptance that someone might\nnot consider it offensive. For example, I did not read it as offensive\nat all.\n\nI think it might have been in the third person because at that point,\nPeter didn't expect a reply from you, and put you on the \"TO\" line\nmerely as a courtesy. He could have put out an email about reverting\nthe patch without you on the email header at all, I guess --- then he\ncould have referred to you without offending you.\n\n> > How could anybody on the RMT judge what was going on sensibly? There\n> > was *zero* information from you (the original committer, our point of\n> > contact) about an item that is in a totally unfamiliar part of the\n> > code to every other committer. We were effectively forced to make\n> > very\n> > conservative assumptions about the deadline. I think that it's very\n> > likely that this could have been avoided if only you'd engaged to\n> > some\n> > degree -- if you had said it was a short deadline then we'd likely\n> > have taken your word for it, as the relevant subject matter expert\n> > and\n> > committer in charge of the item. But we were never given that choice.\n> \n> The same holds the other way round, I only understood later that you\n> wanted more information. Had I known that earlier, I would have gladly\n> given them. \n\nLet me be practical here --- the more someone has to be chased for a\nreply, the less confidence they have in that person. If the RMT\ncontacts you about something, and obviously they have had to take usual\nefforts to contact you, the more it is on you to give a full report and\na timeline of when you will address the issue. If they had to chase you\naround, and you gave them a short answer, the less confidence they have\nin this getting resolved in a timely manner.\n\nIt is the RMT's responsibility to resolve things in a timely manner, and\nsince they have contacted you, you should be going out of your way to at\nleast give them confidence that it will be dealt with by you, rather\nthan them. Whether the problem is them not asking for a timeline or you\nnot offering one, the real solution would have been to provide a\ntimeline to them when they contacted you, since if the RMT is contacting\nyou, it is a serious issue.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Mon, 9 Aug 2021 17:04:50 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Mon, Aug 9, 2021 at 1:38 PM Michael Meskes <meskes@postgresql.org> wrote:\n> > I don't want to upset anybody for any reason. I regret that my words\n> > have upset you, but I think that they were misinterpreted in a way\n> > that I couldn't possibly have predicted. The particular aspect of\n>\n> I strongly object to that. It's pretty obvious to me that addressing\n> people in third person is very offending.\n\nI think that this must be a cultural thing. I can see how somebody\nwould see the third person style as overly formal or stilted. An\ninterpretation like that does make sense to me. But I knew of no\nreason why you might find that style made the message offensive. It\nwas never intended to denigrate.\n\nI don't know you all that well, but we have talked for quite a while\non a few occasions. I got the sense that you are a decent person from\nthese conversations. I have no possible reason to denigrate or insult\nyou. In general I try to be respectful, and if I ever fail it's not\nbecause I didn't care at all. Anybody that knows me well knows that I\nam not a mean spirited person.\n\nIf this is just an unfortunate misunderstanding, as I suspect it is,\nthen I would be happy to let it go, and treat it as something to learn\nfrom.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Mon, 9 Aug 2021 14:05:32 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> > > I don't want to upset anybody for any reason. I regret that my\n> > > words\n> > > have upset you, but I think that they were misinterpreted in a\n> > > way\n> > > that I couldn't possibly have predicted. The particular aspect of\n> > \n> > I strongly object to that. It's pretty obvious to me that\n> > addressing\n> > people in third person is very offending.\n> \n> So, you object to him referring to you in the third person in an\n> email,\n> and you object to him saying it was \"misinterpreted\".  Are you going\n> to\n> object to my email too?\n\nNo, of course not. And sorry for not being precise enough, I only\nobjected to the prediction part, but I agree, I take the objection\nback. I guess it's as difficult for Peter to understand why this is\noffensive as it is for me to not see it as such.\n\n> I think it might have been in the third person because at that point,\n> Peter didn't expect a reply from you, and put you on the \"TO\" line\n> merely as a courtesy.  He could have put out an email about reverting\n> the patch without you on the email header at all, I guess --- then he\n> could have referred to you without offending you.\n\nRight, that was my only problem originally. It seemed difficult to\nbring that point over.\n\n> Let me be practical here --- the more someone has to be chased for a\n> reply, the less confidence they have in that person.  If the RMT\n> contacts you about something, and obviously they have had to take\n> usual\n> efforts to contact you, the more it is on you to give a full report\n> and\n> a timeline of when you will address the issue.  If they had to chase\n> you\n> around, and you gave them a short answer, the less confidence they\n> have\n> in this getting resolved in a timely manner.\n\nAgain agreed, please keep in mind, though, that I didn't notice I was\nbeing chased until Peter's first email.\n\nMichael\n\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Mon, 09 Aug 2021 23:48:07 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Mon, Aug 9, 2021 at 11:48:07PM +0200, Michael Meskes wrote:\n> No, of course not. And sorry for not being precise enough, I only\n> objected to the prediction part, but I agree, I take the objection\n> back. I guess it's as difficult for Peter to understand why this is\n> offensive as it is for me to not see it as such.\n\nOK, good.\n\n> > Let me be practical here --- the more someone has to be chased for a\n> > reply, the less confidence they have in that person.  If the RMT\n> > contacts you about something, and obviously they have had to take\n> > usual\n> > efforts to contact you, the more it is on you to give a full report\n> > and\n> > a timeline of when you will address the issue.  If they had to chase\n> > you\n> > around, and you gave them a short answer, the less confidence they\n> > have\n> > in this getting resolved in a timely manner.\n> \n> Again agreed, please keep in mind, though, that I didn't notice I was\n> being chased until Peter's first email.\n\nI was asked by the RMT to contact one of your employees, and I did, to\ntell you that the RMT was looking for feedback from you on an ecpg\nissue. Not sure if that was before or after Peter's email.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Mon, 9 Aug 2021 18:00:00 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Peter,\n\n> I think that this must be a cultural thing. I can see how somebody\n> would see the third person style as overly formal or stilted. An\n> interpretation like that does make sense to me. But I knew of no\n> reason why you might find that style made the message offensive. It\n> was never intended to denigrate.\n\nThis explains why it felt so difficult to make myself clear. I was\nfeeling exactly the same, just the other way round.\n\n> I don't know you all that well, but we have talked for quite a while\n> on a few occasions. I got the sense that you are a decent person from\n> these conversations. I have no possible reason to denigrate or insult\n> you. In general I try to be respectful, and if I ever fail it's not\n> because I didn't care at all. Anybody that knows me well knows that I\n> am not a mean spirited person.\n\nI never though that. Maybe we should have quickly talked things out.\nEmail tends to be a bad medium for communication, especially when it\ngoes wrong. :)\n\n> If this is just an unfortunate misunderstanding, as I suspect it is,\n> then I would be happy to let it go, and treat it as something to\n> learn\n> from.\n\nAgreed, me too. \n\nI'd like to apologize for not answering your email the way I should\nhave. Honestly it never occurred to me. Maybe that's because I'm used\nto other procedures, or because I never had to converse with the RMT,\nor maybe, quite simple, because I lacked the time to think it through,\nthe original issue that kind of started this whole mess.\n\nMichael\n\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Tue, 10 Aug 2021 00:03:24 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Mon, Aug 9, 2021 at 06:00:00PM -0400, Bruce Momjian wrote:\n> > Again agreed, please keep in mind, though, that I didn't notice I was\n> > being chased until Peter's first email.\n> \n> I was asked by the RMT to contact one of your employees, and I did, to\n> tell you that the RMT was looking for feedback from you on an ecpg\n> issue. Not sure if that was before or after Peter's email.\n\nThe date of that request was July 28 and I was told by your employee\nthat you would be informed that afternoon. If you want the employee's\nname, I will provide it in a private email.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Mon, 9 Aug 2021 18:04:02 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Tue, Aug 10, 2021 at 12:03:24AM +0200, Michael Meskes wrote:\n> I'd like to apologize for not answering your email the way I should\n> have. Honestly it never occurred to me. Maybe that's because I'm used\n> to other procedures, or because I never had to converse with the RMT,\n> or maybe, quite simple, because I lacked the time to think it through,\n> the original issue that kind of started this whole mess.\n\nAgreed. When the RMT contacts me, I assume it is something that is time\nand release critical so I give them as much detail as I can, and a\ntimeline when they will get more information. If you are not focused on\nthe RMT process, it might not be clear why that is important.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Mon, 9 Aug 2021 18:15:10 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> > Again agreed, please keep in mind, though, that I didn't notice I\n> > was\n> > being chased until Peter's first email.\n> \n> I was asked by the RMT to contact one of your employees, and I did,\n> to\n> tell you that the RMT was looking for feedback from you on an ecpg\n> issue.  Not sure if that was before or after Peter's email.\n\nI think that was before, at that point I still thought it was nothing\ntime sensitive. And unfortunately it didn't register that RMT was\ninvolved at all.\n\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Tue, 10 Aug 2021 00:21:16 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "\nOn 8/9/21 6:15 PM, Bruce Momjian wrote:\n> On Tue, Aug 10, 2021 at 12:03:24AM +0200, Michael Meskes wrote:\n>> I'd like to apologize for not answering your email the way I should\n>> have. Honestly it never occurred to me. Maybe that's because I'm used\n>> to other procedures, or because I never had to converse with the RMT,\n>> or maybe, quite simple, because I lacked the time to think it through,\n>> the original issue that kind of started this whole mess.\n> Agreed. When the RMT contacts me, I assume it is something that is time\n> and release critical so I give them as much detail as I can, and a\n> timeline when they will get more information. If you are not focused on\n> the RMT process, it might not be clear why that is important.\n>\n\nThat's what you should be doing. If nothing else comes from this\ncolloquy it should make all committers aware of the process. The reason\nwe have an RMT, as I understand it, is to prevent the situation we had\nyears ago when things sometimes dragged on almost interminably after\nfeature freeze.\n\n\ncheers\n\n\nandrew\n\n\n\n", "msg_date": "Mon, 9 Aug 2021 18:25:56 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Michael,\n\nOn Mon, Aug 9, 2021 at 3:03 PM Michael Meskes <meskes@postgresql.org> wrote:\n> This explains why it felt so difficult to make myself clear. I was\n> feeling exactly the same, just the other way round.\n\nMy own special brand of miscommunication was also involved. I happen\nto be sensitive to the perception that I yield any authority that I\nmight have (as an RMT member, as a committer, whatever) in a way that\nis arbitrary or unfair. And so I wrote way too much about why that\nwasn't actually the case here. I now realize that that wasn't really\nrelevant.\n\n> I never though that. Maybe we should have quickly talked things out.\n> Email tends to be a bad medium for communication, especially when it\n> goes wrong. :)\n\nIndeed. That might well have happened if we had been set up for it already.\n\n> I'd like to apologize for not answering your email the way I should\n> have. Honestly it never occurred to me. Maybe that's because I'm used\n> to other procedures, or because I never had to converse with the RMT,\n> or maybe, quite simple, because I lacked the time to think it through,\n> the original issue that kind of started this whole mess.\n\nI think that there was a snowballing effect here. We both made\nmistakes that compounded. I apologize for my role in this whole mess.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Mon, 9 Aug 2021 15:42:45 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Mon, Aug 9, 2021 at 03:42:45PM -0700, Peter Geoghegan wrote:\n> > I'd like to apologize for not answering your email the way I should\n> > have. Honestly it never occurred to me. Maybe that's because I'm used\n> > to other procedures, or because I never had to converse with the RMT,\n> > or maybe, quite simple, because I lacked the time to think it through,\n> > the original issue that kind of started this whole mess.\n> \n> I think that there was a snowballing effect here. We both made\n> mistakes that compounded. I apologize for my role in this whole mess.\n\nI do think all committers need to understand the role of the RMT so they\ncan respond appropriately. Do we need to communicate this better?\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Mon, 9 Aug 2021 18:51:36 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Mon, Aug 9, 2021 at 3:51 PM Bruce Momjian <bruce@momjian.us> wrote:\n> > I think that there was a snowballing effect here. We both made\n> > mistakes that compounded. I apologize for my role in this whole mess.\n>\n> I do think all committers need to understand the role of the RMT so they\n> can respond appropriately. Do we need to communicate this better?\n\nI think that it makes sense to codify the practical expectations that\nthe community has of existing committers, at least to some degree. I\nmean why wouldn't we? The resulting document (an addition to the\n\"committers\" Postgres wiki page?) would inevitably leave certain\nquestions open to interpretation. That seems okay to me.\n\nI don't feel qualified to write this myself. Just my opinion.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Mon, 9 Aug 2021 16:20:05 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Mon, Aug 09, 2021 at 10:50:29PM +0200, Michael Meskes wrote:\n>> On the substance of the issue, one question that I have reading over\n>> this thread is whether there's really a bug here at all. It is being\n>> represented (and I have not checked whether this is accurate) that\n>> the\n>> patch affects the behavior of  DECLARE, PREPARE, and EXECUTE, but not\n>> DESCRIBE, DEALLOCATE, DECLARE CURSOR .. FOR, or CREATE TABLE AS\n>> EXECUTE. However, it also seems that it's not entirely clear what the\n>> behavior ought to be in those cases, except perhaps for DESCRIBE. If\n>> that's the case, maybe this doesn't really need to be an open item,\n>> and maybe we don't therefore need to talk about whether it should be\n>> reverted. Maybe it's just a feature that supports certain things now\n>> and in the future, after due reflection, perhaps it will support\n>> more.\n> \n> The way I see it we should commit the patch that makes more statements\n> honor the new DECLARE STATEMENT feature. I don't think we can change\n> anything for the worse by doing that. And other databases are not\n> really strict about this either.\n\nOkay. So you mean to change DESCRIBE and DEALLOCATE to be able to\nhandle cached connection names, as of [1]? For [DE]ALLOCATE, I agree\nthat it could be a good thing. declare.pgc seems to rely on that\nalready but the tests are incorrect as I mentioned in [2]. For\nDESCRIBE, that provides data about a result set, I find the assignment\nof a connection a bit strange, and even if this would allow the use of\nthe same statement name for multiple connections, it seems to me that \nthere is a risk of breaking existing applications. There should not\nbe that many, so perhaps that's fine anyway.\n\n[1]: https://www.postgresql.org/message-id/TYAPR01MB5866973462D17F2AEBD8EBB8F51F9@TYAPR01MB5866.jpnprd01.prod.outlook.com\n[2]: https://www.postgresql.org/message-id/YOQNCyfxp868zZUV@paquier.xyz\n--\nMichael", "msg_date": "Tue, 10 Aug 2021 15:20:50 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "\n\nPeter,\n\n> I think that there was a snowballing effect here. We both made\n> mistakes that compounded. I apologize for my role in this whole mess.\n\nCompletely agreed. I think we both took things for granted that the\nother one didn't take into account at all. I'm sorry about that, too.\n\nMichael\n\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Tue, 10 Aug 2021 09:26:31 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> Okay.  So you mean to change DESCRIBE and DEALLOCATE to be able to\n> handle cached connection names, as of [1]?  For [DE]ALLOCATE, I agree\n\nYes, at least technically. I think DESCRIBE should accept the cached\nconnection name, although it won't really matter.\n\n> that it could be a good thing.  declare.pgc seems to rely on that\n> already but the tests are incorrect as I mentioned in [2].  For\n> DESCRIBE, that provides data about a result set, I find the\n> assignment\n> of a connection a bit strange, and even if this would allow the use\n> of\n> the same statement name for multiple connections, it seems to me that\n> there is a risk of breaking existing applications.  There should not\n> be that many, so perhaps that's fine anyway.\n\nI don't think we'd break anything given that DECLARE STATEMENT is new.\nAlso please keep in mind that you can use EXEC SQL AT ... DESCRIBE ...;\nalready anyway. Again, not very meaningful but why should we accept a\nconnection one way but not the other?\n\nMichael\n\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org", "msg_date": "Tue, 10 Aug 2021 09:31:37 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> I do think all committers need to understand the role of the RMT so\n> they\n> can respond appropriately.  Do we need to communicate this better?\n\nBeing the one who assumed a different procedure, yes please. :)\n\nThanks,\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Tue, 10 Aug 2021 09:37:19 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Dear Meskes and any hackers,\r\n\r\n> Yes, at least technically. I think DESCRIBE should accept the cached\r\n> connection name, although it won't really matter.\r\n\r\nI sought docs too and I found that Pro*C have such a same policy,\r\nso it might be reasonable:\r\n\r\nhttps://docs.oracle.com/en/database/oracle/oracle-database/21/lnpcc/Oracle-dynamic-SQL.html#GUID-0EB50EB7-D4C8-401D-AFCD-340D281711C4\r\n\r\n\r\n\r\nAnyway I revised patches again in the current spec. I separated them into 6 parts:\r\n\r\n0001: move \"connection = NULL\" to top rule. This is per Wang.\r\n0002: adds supporting deallocate statement.\r\n0003: adds supporting describe statement. The above and this are main parts.\r\n0004: adds warning then the connection is overwritten. This is per Horiguchi-san.\r\n0005: adds warning then the connection is overwritten. This is per Horiguchi-san and Paquier.\r\n0006: adds some tests.\r\n\r\n0001 is the solution of follows:\r\nhttps://www.postgresql.org/message-id/OSBPR01MB42141999ED8EFDD4D8FDA42CF2E29%40OSBPR01MB4214.jpnprd01.prod.outlook.com\r\n\r\nThis bug is caused because \"connection = NULL\" is missing is missing in some cases, so I force to\r\nsubstitute NULL in the statement: rule, the top-level in the parse tree.\r\nI also remove the substitution from output.c because such line is overlapped.\r\nIf you think this change is too large, I can erase 0001 and add a substitution to the end part of\r\nECPGCursorStmt rule. That approach is also resolve the bug and impact is very small.\r\n\r\n0004 is an optional patch, this is not related with DEALLOCATE and DESCRIBE.\r\nWe were discussing about how should work when followings are pre-compiled and executed:\r\n\r\n> EXEC SQL AT conn1 DECLARE stmt STATEMENT;\r\n> EXEC SQL AT conn2 EXECUTE stmt INTO ..;\r\n\r\nCurrently line 2 will execute at conn1 without any warnings (and this is the Oracle's spec) but Horiguchi-san says it is non-obvious.\r\nSo I added a precompiler-warning when the above situation.\r\nMore discussion might be needed here, but this is not main part.\r\n\r\nAbout 0005, see previous discussion:\r\n\r\n> Since we don't allow descriptors with the same name even if they are\r\n> for the different connections, I think we can set the current\r\n> connection if any (which is set either by AT option or statement-bound\r\n> one) to i->connection silently if i->connection is NULL in\r\n> lookup_descriptor(). What do you think about this?\r\n\r\nHow do you think?\r\n\r\nBest Regards,\r\nHayato Kuroda\r\nFUJITSU LIMITED", "msg_date": "Tue, 10 Aug 2021 09:27:46 +0000", "msg_from": "\"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com>", "msg_from_op": true, "msg_subject": "RE: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Tue, Aug 10, 2021 at 09:37:19AM +0200, Michael Meskes wrote:\n> > I do think all committers need to understand the role of the RMT so\n> > they\n> > can respond appropriately.  Do we need to communicate this better?\n> \n> Being the one who assumed a different procedure, yes please. :)\n\nI think my point was that committers should be required to understand\nthe RMT process, and if we need to communicate that better, let's do\nthat. I don't think it should be the responsibility of RMT members to\ncommunicate the RMT process every time they communicate with someone,\nunless someone asks.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Tue, 10 Aug 2021 13:18:23 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> I think my point was that committers should be required to understand\n> the RMT process, and if we need to communicate that better, let's do\n> that.  I don't think it should be the responsibility of RMT members\n> to\n> communicate the RMT process every time they communicate with someone,\n> unless someone asks.\n\nCompletely agreed, my point was that documenting the process to some\nextend would be helpful. For obvious reasons I'm the wrong person to do\nthat, though. :)\n\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Tue, 10 Aug 2021 20:05:29 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Tue, Aug 10, 2021 at 08:05:29PM +0200, Michael Meskes wrote:\n> > I think my point was that committers should be required to understand\n> > the RMT process, and if we need to communicate that better, let's do\n> > that.  I don't think it should be the responsibility of RMT members\n> > to\n> > communicate the RMT process every time they communicate with someone,\n> > unless someone asks.\n> \n> Completely agreed, my point was that documenting the process to some\n> extend would be helpful. For obvious reasons I'm the wrong person to do\n> that, though. :)\n\nAgreed. How is this, which already exists?\n\n\thttps://wiki.postgresql.org/wiki/Release_Management_Team\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Tue, 10 Aug 2021 14:07:00 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> Agreed.  How is this, which already exists?\n> \n>         https://wiki.postgresql.org/wiki/Release_Management_Team\n\nThat I know, but I don't think it covers the issues we, or I, had up-\nthread. Or do I miss something?\n\nSpeaking of RMT, Andrew, Michael, Peter, will you make the final\ndecision whether we commit Kuroda-san's patches?\n\nThey are fine by me. Another pair of eyes would be nice, though. maybe\nyou could have another look, Horiguchi-san?\n\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Tue, 10 Aug 2021 20:16:49 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "\nOn 8/10/21 2:16 PM, Michael Meskes wrote:\n>> Agreed.  How is this, which already exists?\n>>\n>>         https://wiki.postgresql.org/wiki/Release_Management_Team\n> That I know, but I don't think it covers the issues we, or I, had up-\n> thread. Or do I miss something?\n\n\nNo, you're right, although I think it's implied. Maybe we need a\nstatement along these lines:\n\n\nCommitters are responsible for the resolution of open items that relate\nto commits they have made. Action needs to be taken in a timely fashion,\nand if there is any substantial delay in dealing with an item the\ncommitter should provide a date by which they expect action to be\ncompleted. The RMT will follow up where these requirements are not being\ncomplied with.\n\n\n\n>\n> Speaking of RMT, Andrew, Michael, Peter, will you make the final\n> decision whether we commit Kuroda-san's patches?\n>\n> They are fine by me. Another pair of eyes would be nice, though. maybe\n> you could have another look, Horiguchi-san?\n>\n\n\nIf they are fine by you then I accept that. After all, the reason we\nwant you to deal with this is not only that you made the original commit\nbut because you're the expert in this area.\n\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Tue, 10 Aug 2021 16:46:18 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Tue, Aug 10, 2021 at 1:46 PM Andrew Dunstan <andrew@dunslane.net> wrote:\n> No, you're right, although I think it's implied. Maybe we need a\n> statement along these lines:\n\nI agree with that, but to me it's more in the scope of what is\nexpected of committers in general. At a very high level. So it's not\nsomething that I'd expect to see on the RMT Postgres Wiki page. I\nwould expect to see it on the committers Wiki page, somewhere like\nthat.\n\n> If they are fine by you then I accept that. After all, the reason we\n> want you to deal with this is not only that you made the original commit\n> but because you're the expert in this area.\n\n+1.\n\nNobody questioned the original commit, so it would be premature (if\nnot totally arbitrary) to change our approach now, at the first sign\nof trouble. To the best of my knowledge there is no special risk with\napplying this patch to address the behavioral inconsistencies, nor is\nthere any known special risk with any other fix. Including even\ndeciding to *not* fix the inconsistency in Postgres 14 based on\npractical considerations -- for all I know Michael might be perfectly\njustified in interpreting the patch as new feature work that's out of\nscope now.\n\nI don't feel qualified to even offer an opinion.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Tue, 10 Aug 2021 14:16:57 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Tue, Aug 10, 2021 at 09:31:37AM +0200, Michael Meskes wrote:\n>> that it could be a good thing.  declare.pgc seems to rely on that\n>> already but the tests are incorrect as I mentioned in [2].  For\n>> DESCRIBE, that provides data about a result set, I find the\n>> assignment\n>> of a connection a bit strange, and even if this would allow the use\n>> of\n>> the same statement name for multiple connections, it seems to me that\n>> there is a risk of breaking existing applications.  There should not\n>> be that many, so perhaps that's fine anyway.\n> \n> I don't think we'd break anything given that DECLARE STATEMENT is new.\n\nSure, DECLARE does not matter as it is new. However, please note that\nthe specific point I was trying to make with my link [2] from upthread\nis related to the use of cached connection names with DEALLOCATE, as\nof this line in the new test declare.pgc:\n EXEC SQL DEALLOCATE PREPARE stmt_2;\n\nAnd DEALLOCATE is far from being new.\n\n> Also please keep in mind that you can use EXEC SQL AT ... DESCRIBE ...;\n> already anyway. Again, not very meaningful but why should we accept a\n> connection one way but not the other?\n\nNo objections to that.\n--\nMichael", "msg_date": "Wed, 11 Aug 2021 09:29:23 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> Sure, DECLARE does not matter as it is new.  However, please note\n> that\n> the specific point I was trying to make with my link [2] from\n> upthread\n> is related to the use of cached connection names with DEALLOCATE, as\n> of this line in the new test declare.pgc:\n>     EXEC SQL DEALLOCATE PREPARE stmt_2;\n> \n> And DEALLOCATE is far from being new.\n\nI'm not sure I understand. Any usage of DECLARE STATEMENT makes the\nfile need the current version of ecpg anyway. On the other hand\nDEALLOCATE did not change its behavior if no DECLARE STATEMENT was\nissued, or what did I miss?\n\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org", "msg_date": "Wed, 11 Aug 2021 22:41:59 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> No, you're right, although I think it's implied. Maybe we need a\n> statement along these lines:\n> \n> \n> Committers are responsible for the resolution of open items that\n> relate\n> to commits they have made. Action needs to be taken in a timely\n> fashion,\n> and if there is any substantial delay in dealing with an item the\n> committer should provide a date by which they expect action to be\n> completed. The RMT will follow up where these requirements are not\n> being\n> complied with.\n\nI think that would be helpful, yes.\n\n> If they are fine by you then I accept that. After all, the reason we\n> want you to deal with this is not only that you made the original\n> commit\n> but because you're the expert in this area.\n\nI will commit the patch(es). Thanks.\n\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org\n\n\n\n", "msg_date": "Thu, 12 Aug 2021 19:21:01 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Michael Meskes <meskes@postgresql.org> writes:\n> I will commit the patch(es). Thanks.\n\nThis commit appears to be responsible for new noise on stderr\nduring check-world:\n\n$ make -s check-world >/dev/null\ndeclare.pgc:123: WARNING: connection \"con2\" is overwritten to \"con1\".\ndeclare.pgc:124: WARNING: connection \"con2\" is overwritten to \"con1\".\ndeclare.pgc:135: WARNING: connection \"con2\" is overwritten to \"con1\".\n\nPlease do something about that.\n\n(1) There should be no output to stderr in the tests. Why isn't this\nmessage being caught and redirected to the normal test output file?\n\n(2) This message is both unintelligible and grammatically incorrect.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sat, 14 Aug 2021 23:08:44 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Sat, Aug 14, 2021 at 11:08:44PM -0400, Tom Lane wrote:\n> Please do something about that.\n> \n> (1) There should be no output to stderr in the tests. Why isn't this\n> message being caught and redirected to the normal test output file?\n\nThese are generated during the compilation of the tests with the\npre-processor, so that's outside the test runs.\n\n> (2) This message is both unintelligible and grammatically incorrect.\n\nYeah, debugging such tests would be more helpful if the name of the\nDECLARE statement is included, at least. Those messages being\ngenerated is not normal anyway, which is something coming from the\ntests as a typo with the connection name of stmt_3.\n\nMichael, what do you think about the attached?\n--\nMichael", "msg_date": "Mon, 16 Aug 2021 15:27:26 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Wed, Aug 11, 2021 at 10:41:59PM +0200, Michael Meskes wrote:\n> I'm not sure I understand. Any usage of DECLARE STATEMENT makes the\n> file need the current version of ecpg anyway. On the other hand\n> DEALLOCATE did not change its behavior if no DECLARE STATEMENT was\n> issued, or what did I miss?\n\nYes, you are right here. I went through the code again and noticed by\nmistake. Sorry for the noise.\n--\nMichael", "msg_date": "Mon, 16 Aug 2021 16:27:08 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "> > (1) There should be no output to stderr in the tests.  Why isn't\n> > this\n> > message being caught and redirected to the normal test output file?\n> \n> These are generated during the compilation of the tests with the\n> pre-processor, so that's outside the test runs.\n\nThis is actually a deeper issue, we have no test for the compiler\nitself, other than the source code it generates. We do not test\nwarnings or errors thrown by it. The topic has come up ages ago and we\nsimply removed the test that generated the (planned) warning message.\n\n> > (2) This message is both unintelligible and grammatically\n> > incorrect.\n> \n> Yeah, debugging such tests would be more helpful if the name of the\n> DECLARE statement is included, at least.  Those messages being\n> generated is not normal anyway, which is something coming from the\n> tests as a typo with the connection name of stmt_3.\n> \n> Michael, what do you think about the attached?\n\nI think what Tom was saying is that it should be either \"is overwritten\nwith\" or \"is rewritten to\", but you raise a very good point. Adding the\nstatement name makes the message better. I fully agree. However, it\nshould be the other way round, the DECLARE STATEMENT changes the\nconnection that is used. \n\nYou patch removes the warning but by doing that also removes the\nfeature that is being tested.\n\nI'm not sure what's the best way to go about it, Shall we accept to not\ntest this particular feature and remove the warning? After all this is\nnot the way the statement should be used, hence the warning. Or should\nbe keep it in and redirect the warning? In that case, we would also\nlose other warnings that are not planned, though.\n\nAny comments?\n\nMichael\n-- \nMichael Meskes\nMichael at Fam-Meskes dot De\nMichael at Meskes dot (De|Com|Net|Org)\nMeskes at (Debian|Postgresql) dot Org", "msg_date": "Mon, 16 Aug 2021 12:06:16 +0200", "msg_from": "Michael Meskes <meskes@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Mon, Aug 16, 2021 at 12:06:16PM +0200, Michael Meskes wrote:\n> You patch removes the warning but by doing that also removes the\n> feature that is being tested.\n\nOops. If kept this way, this test scenario is going to need a comment\nto explain exactly that.\n\n> I'm not sure what's the best way to go about it, Shall we accept to not\n> test this particular feature and remove the warning? After all this is\n> not the way the statement should be used, hence the warning. Or should\n> be keep it in and redirect the warning? In that case, we would also\n> lose other warnings that are not planned, though.\n\nFWIW, I would tend to drop the warning here. I am not sure that this\nis a use case interesting enough. My 2c.\n--\nMichael", "msg_date": "Tue, 17 Aug 2021 15:34:28 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Tue, Aug 17, 2021 at 03:34:28PM +0900, Michael Paquier wrote:\n> On Mon, Aug 16, 2021 at 12:06:16PM +0200, Michael Meskes wrote:\n>> You patch removes the warning but by doing that also removes the\n>> feature that is being tested.\n> \n> Oops. If kept this way, this test scenario is going to need a comment\n> to explain exactly that.\n\nMichael has adjusted that as of f576de1, so I am closing this open\nitem.\n--\nMichael", "msg_date": "Wed, 18 Aug 2021 10:17:37 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Hi,\n\nSorry for being late. I had a vaccination.\n\nI'm not sure about the rule that stderr should be removed\neven if the pre-compiling state, but anyway I agree that\nthe warned case is not expected.\nThe wrong message is perfectly fault...\n\nI confirmed your commit and I think it's OK. Thanks!\n\nBest Regards,\nHayato Kuroda\nFUJITSU LIMITED\n\n\n\n", "msg_date": "Thu, 19 Aug 2021 11:27:17 +0000", "msg_from": "\"kuroda.hayato@fujitsu.com\" <kuroda.hayato@fujitsu.com>", "msg_from_op": true, "msg_subject": "RE: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "Hi, \n\nI find in ecpg.header, the source:\n>\t\t\tif (connection)\n>\t\t\tif (connection && strcmp(ptr->connection, connection) != 0)\n\nThe first if statement is useless. And in fix-ecpg-tests.patch:\n>-\t\t\tif (connection)\n>-\t\t\t\tmmerror(PARSE_ERROR, ET_WARNING, \"connection %s is overwritten to %s.\", connection, ptr->connection);\n>+\t\t\tif (connection && strcmp(ptr->connection, connection) != 0)\n>+\t\t\t\tmmerror(PARSE_ERROR, ET_WARNING, \"declare statement %s using connection %s overwritten to connection %s.\",\n>+\t\t\t\t\tname, connection, ptr->connection);\nThe patch seems right.\n\nDelete first if statement, patch attached.\n\nRegards,\nShenhao Wang", "msg_date": "Wed, 25 Aug 2021 05:10:57 +0000", "msg_from": "\"wangsh.fnst@fujitsu.com\" <wangsh.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" }, { "msg_contents": "On Wed, Aug 25, 2021 at 05:10:57AM +0000, wangsh.fnst@fujitsu.com wrote:\n> Delete first if statement, patch attached.\n\nIndeed, this looks like a mismerge. I'll apply that in a bit.\nFunnily, Coverity did not mention that.\n--\nMichael", "msg_date": "Wed, 25 Aug 2021 14:34:21 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: ECPG bug fix: DECALRE STATEMENT and DEALLOCATE, DESCRIBE" } ]
[ { "msg_contents": "Hi all,\n\nWhile digging into some of the TAP tests, I have noticed that\n002_pgbench_no_server.pl prints array pointers, like that:\nopts=-f no-such-file, stat=1, out=ARRAY(0x1374d7990),\nerr=ARRAY(0x14028dc40), name=pgbench option error: no file# Running:\npgbench -f no-such-file\n\nI am a bit dubious that this information is useful when it comes to\ndebugging because we have the name of the tests close by, so I would\njust remove those extra logs. If people prefer keeping this\ninformation around, we could fix the format with something like the\nattached, for example.\n\nThoughts?\n--\nMichael", "msg_date": "Fri, 25 Jun 2021 21:33:25 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": true, "msg_subject": "Some incorrect logs in TAP tests of pgbench" }, { "msg_contents": "\nOn 6/25/21 8:33 AM, Michael Paquier wrote:\n> Hi all,\n>\n> While digging into some of the TAP tests, I have noticed that\n> 002_pgbench_no_server.pl prints array pointers, like that:\n> opts=-f no-such-file, stat=1, out=ARRAY(0x1374d7990),\n> err=ARRAY(0x14028dc40), name=pgbench option error: no file# Running:\n> pgbench -f no-such-file\n>\n> I am a bit dubious that this information is useful when it comes to\n> debugging because we have the name of the tests close by, so I would\n> just remove those extra logs. If people prefer keeping this\n> information around, we could fix the format with something like the\n> attached, for example.\n>\n> Thoughts?\n\n\nEither that or dereference them, by printing @$out and @$err instead of\n$out and $err or something similar.\n\nBut probably the name of the test is sufficient. (What were we thinking\nin allowing this in the first place?)\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Fri, 25 Jun 2021 09:37:50 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: Some incorrect logs in TAP tests of pgbench" }, { "msg_contents": "On 2021-Jun-25, Michael Paquier wrote:\n\n> I am a bit dubious that this information is useful when it comes to\n> debugging because we have the name of the tests close by, so I would\n> just remove those extra logs. If people prefer keeping this\n> information around, we could fix the format with something like the\n> attached, for example.\n\nI agree it's not useful -- command_checks_all logs each element of those\narrays already, when doing its like(). So ISTM we can do away with them.\n\n-- \n�lvaro Herrera 39�49'30\"S 73�17'W\n\"Most hackers will be perfectly comfortable conceptualizing users as entropy\n sources, so let's move on.\" (Nathaniel Smith)\n\n\n", "msg_date": "Fri, 25 Jun 2021 11:55:13 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Some incorrect logs in TAP tests of pgbench" }, { "msg_contents": "On Fri, Jun 25, 2021 at 09:37:50AM -0400, Andrew Dunstan wrote:\n> Either that or dereference them, by printing @$out and @$err instead of\n> $out and $err or something similar.\n\nLooking again, we don't really lose context if we remove that, so done\nthis way.\n\n> But probably the name of the test is sufficient. (What were we thinking\n> in allowing this in the first place?)\n\nNo idea. This got introduced in v5 of what got committed as of\ned8a7c6:\nhttps://www.postgresql.org/message-id/alpine.DEB.2.20.1705091641150.29373@lancre\n--\nMichael", "msg_date": "Sat, 26 Jun 2021 12:50:24 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": true, "msg_subject": "Re: Some incorrect logs in TAP tests of pgbench" }, { "msg_contents": "> (What were we thinking in allowing this in the first place?)\n\nTemporary debug leftovers that got through, I'd say.\n\nThanks Micha�l for the clean up!\n\n-- \nFabien.", "msg_date": "Sat, 26 Jun 2021 07:42:07 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: Some incorrect logs in TAP tests of pgbench" } ]
[ { "msg_contents": "Hello,\n\nin src/tools/msvc/Solution.pm (in the current master) there is a \nleftover from the past:\n > confess \"Bad format of version: $self->{strver}\\n\";\n\nstrver has been gone since 8f4fb4c6 in 2019, so I suggest an obvious \none-line fix in the patch attached:\n\ndiff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm\nindex a7b8f720b55..fcb43b0ca05 100644\n--- a/src/tools/msvc/Solution.pm\n+++ b/src/tools/msvc/Solution.pm\n@@ -176,7 +176,7 @@ sub GenerateFiles\n\n \t\t\tif ($package_version !~ /^(\\d+)(?:\\.(\\d+))?/)\n \t\t\t{\n-\t\t\t\tconfess \"Bad format of version: $self->{strver}\\n\";\n+\t\t\t\tconfess \"Bad format of version: $package_version\\n\";\n \t\t\t}\n \t\t\t$majorver = sprintf(\"%d\", $1);\n \t\t\t$minorver = sprintf(\"%d\", $2 ? $2 : 0);\n\nI think this should be backported to REL_13_STABLE, but not to \nREL_12_STABLE and earlier, where strver was still present.\n\n-- \nAnton Voloshin,\nPostgres Professional, The Russian Postgres Company\nhttps://postgrespro.ru", "msg_date": "Sat, 26 Jun 2021 00:47:50 +0700", "msg_from": "Anton Voloshin <a.voloshin@postgrespro.ru>", "msg_from_op": true, "msg_subject": "[patch] remove strver's leftover from error message in Solution.pm" }, { "msg_contents": "On Sat, Jun 26, 2021 at 12:47:50AM +0700, Anton Voloshin wrote:\n> I think this should be backported to REL_13_STABLE, but not to REL_12_STABLE\n> and earlier, where strver was still present.\n\nGood catch! I will take care of that.\n--\nMichael", "msg_date": "Sat, 26 Jun 2021 09:37:47 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [patch] remove strver's leftover from error message in\n Solution.pm" } ]
[ { "msg_contents": "Hello all,\n\nI am trying to add support for composite types to my ORM, which uses libpq \nand the binary format.\n\nGiven a schema like this one:\n\ncreate type composite as (...);\ncreate table sometable (field composite, ...);\n\nI want to execute a query like this:\n\nPQexecParams(\"insert into sometable values($1, ...);\", paramValues[0] = serialize some record, ...)\n\nHowever this fails in coerce_record_to_complex(), because it receives a \nnode of type Param, but it can only handle RowExpr and Var. There is a \ncomment suggesting that this is not a fundamental limitation, but (not \nbeing familiar with postgres codebase) I'm not sure how to go about fixing \nit. I assume there is a mapping somewhere from param ids to objects, but \nam unable to find it.\n\nDoes anyone have any pointers or suggestions? Am I going about this in \nentirely the wrong way?\n\n -E\n\n\n", "msg_date": "Sat, 26 Jun 2021 00:10:39 -0700 (PDT)", "msg_from": "Elijah Stone <elronnd@elronnd.net>", "msg_from_op": true, "msg_subject": "Composite types as parameters" }, { "msg_contents": "Elijah Stone <elronnd@elronnd.net> writes:\n> I want to execute a query like this:\n\n> PQexecParams(\"insert into sometable values($1, ...);\", paramValues[0] = serialize some record, ...)\n\n> However this fails in coerce_record_to_complex(), because it receives a \n> node of type Param, but it can only handle RowExpr and Var.\n\nYou probably would have better results from specifying the composite\ntype explicitly in the query:\n\nPQexecParams(\"insert into sometable values($1::composite, ...);\",\n\nI gather from the complaint that you're currently doing something that\ncauses the Param to be typed as a generic \"record\", which is problematic\nsince the record's details are not available from anyplace. But if you\ncast it directly to a named composite type, that should work.\n\nIf it still doesn't work, please provide a more concrete example.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sat, 26 Jun 2021 11:42:03 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Composite types as parameters" }, { "msg_contents": "On Sat, 26 Jun 2021, Tom Lane wrote:\n> You probably would have better results from specifying the composite \n> type explicitly in the query:\n>\n> PQexecParams(\"insert into sometable values($1::composite, ...);\",\n>\n> I gather from the complaint that you're currently doing something that \n> causes the Param to be typed as a generic \"record\", which is problematic \n> since the record's details are not available from anyplace. But if you \n> cast it directly to a named composite type, that should work.\n>\n> If it still doesn't work, please provide a more concrete example.\n\nThanks, unfortunately adding the explicit cast doesn't help. I've \nattached a minimal runnable example.\n\nI am serializing as a generic record, so it occurs to me that another \nsolution would be to use the actual type of the composite in question. \n(Though it also seems to me that my code should work as-is.) Is there a \nway to discover the OID of a composite type? And is the wire format the \nsame as for a generic record?\n\n -E", "msg_date": "Sat, 26 Jun 2021 19:01:47 -0700 (PDT)", "msg_from": "Elijah Stone <elronnd@elronnd.net>", "msg_from_op": true, "msg_subject": "Re: Composite types as parameters" }, { "msg_contents": "Elijah Stone <elronnd@elronnd.net> writes:\n> On Sat, 26 Jun 2021, Tom Lane wrote:\n>> If it still doesn't work, please provide a more concrete example.\n\n> Thanks, unfortunately adding the explicit cast doesn't help. I've \n> attached a minimal runnable example.\n\nSo your problem is that you're explicitly saying that the input is\nof generic-RECORD type. You should let the server infer its type,\ninstead, which it can easily do from context in this example.\nThat is, pass zero as the type OID, or leave out the paramTypes\narray altogether. The example works for me with this change:\n\n@@ -30,13 +30,13 @@\n \n // error:\n check(PQexecParams(c, \"INSERT INTO tab VALUES($1, 8);\",\n- 1, &(Oid){RECORDOID}, &(const char*){recbuf},\n+ 1, &(Oid){0}, &(const char*){recbuf},\n &(int){rec - recbuf}, &(int){1/*binary*/},\n 1/*binary result*/));\n \n // error as well:\n check(PQexecParams(c, \"INSERT INTO tab VALUES($1::some_record, 8);\",\n- 1, &(Oid){RECORDOID}, &(const char*){recbuf},\n+ 1, &(Oid){0}, &(const char*){recbuf},\n &(int){rec - recbuf}, &(int){1},\n 1));\n\nIn more complicated cases you might need to fetch the composite\ntype's actual OID and pass that. But I'd go with the lazy approach\nuntil forced to do differently.\n\n> Is there a \n> way to discover the OID of a composite type? And is the wire format the \n> same as for a generic record?\n\nSame as for any other type: SELECT 'mytypename'::regtype::oid.\nAnd yes.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 27 Jun 2021 11:26:54 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Composite types as parameters" }, { "msg_contents": "On Sun, 27 Jun 2021, Tom Lane wrote:\n\n> You should let the server infer its type, instead, which it can easily \n> do from context in this example. That is, pass zero as the type OID, or \n> leave out the paramTypes\n\nAh, thank you, that works brilliantly. Sorry for the noise!\n\n -E\n\n\n", "msg_date": "Mon, 28 Jun 2021 12:55:33 -0700 (PDT)", "msg_from": "Elijah Stone <elronnd@elronnd.net>", "msg_from_op": true, "msg_subject": "Re: Composite types as parameters" } ]
[ { "msg_contents": "Hi,\n\nif a failover (or probably a switchover, at least in the way Patroni\ndoes it) occurs, the timeline history (e.g. via \"patronictl history\"[1])\nseems to read \"no recovery target specified\". That's correct, of course,\nfrom a PITR perspective, but for the (possibly more common?) promotion-\nof-a-standby-due-to-failover/switchover case rather misleading.\n\nI wonder whether it could be made more informative; like \"no recovery\ntarget or failover\" or \"(standby) promotion witout recovery target\"?\n\n\nMichael\n\n[1]\n\nroot@pg1:~# patronictl -c /etc/patroni/13-main.yml history | head\n+----+----------+------------------------------+----------------------------------+\n| TL | LSN | Reason | Timestamp |\n+----+----------+------------------------------+----------------------------------+\n| 1 | 83886296 | no recovery target specified | 2021-06-18T20:04:11.645437+00:00 |\n| 2 | 83886928 | no recovery target specified | 2021-06-18T20:08:45.820304+00:00 |\n| 3 | 83887384 | no recovery target specified | 2021-06-19T05:57:50.431980+00:00 |\n| 4 | 83887840 | no recovery target specified | 2021-06-19T08:32:55.527975+00:00 |\n| 5 | 84017040 | no recovery target specified | 2021-06-19T12:05:40.495982+00:00 |\n| 6 | 84019264 | no recovery target specified | 2021-06-19T15:51:49.983987+00:00 |\n| 7 | 84135720 | no recovery target specified | 2021-06-20T03:46:22.775851+00:00 |\n\n-- \nMichael Banck\nProjektleiter / Senior Berater\nTel.: +49 2166 9901-171\nFax: +49 2166 9901-100\nEmail: \nmichael.banck@credativ.de\ncredativ GmbH, HRB Mönchengladbach 12080\nUSt-ID-Nummer: DE204566209\nTrompeterallee 108, 41189 Mönchengladbach\nGeschäftsführung: Dr. Michael Meskes, Sascha Heuer\n\nUnser Umgang mit personenbezogenen Daten unterliegt\nfolgenden Bestimmungen: \nhttps://www.credativ.de/datenschutz\n\n\n\n", "msg_date": "Sat, 26 Jun 2021 10:17:52 +0200", "msg_from": "Michael Banck <michael.banck@credativ.de>", "msg_from_op": true, "msg_subject": "Failover messages in Timeline History" } ]
[ { "msg_contents": "[ starting a new thread so as not to confuse the cfbot ]\n\nI wrote:\n> Michael Paquier <michael@paquier.xyz> writes:\n>> Good point. That's worse than just pfree() which is just a plain call\n>> to free() in the frontend. We could have more policies here, but my\n>> take is that we'd better move fe_memutils.o to OBJS_FRONTEND in\n>> src/common/Makefile so as shared libraries don't use those routines in\n>> the long term.\n\n> Ugh. Not only is that bad, but your proposed fix doesn't fix it.\n> At least in psql, and probably in most/all of our other clients,\n> removing fe_memutils.o from libpq's link just causes it to start\n> relying on the copy in the psql executable :-(. So I agree that\n> some sort of mechanical enforcement would be a really good thing,\n> but I'm not sure what it would look like.\n\nAfter some thought I propose that what we really want is to prevent\nany calls of abort() or exit() from inside libpq. Attached is a\ndraft patch to do that. This can't be committed as-is, because\nwe still have some abort() calls in there in HEAD, but if we could\nget that cleaned up it'd work. Alternatively we could just disallow\nexit(), which'd be enough to catch the problematic src/common files.\n\nThis relies on \"nm\" being able to work on shlibs, which it's not\nrequired to by POSIX. However, it seems to behave as desired even\non my oldest dinosaurs. In any case, if \"nm\" doesn't work then\nwe'll just not detect such problems on that platform, which should\nbe OK as long as the test does work on common platforms.\nOther than that point I think it's relying only on POSIX-spec\nfeatures.\n\nI'll stick this into the CF list to see if the cfbot agrees that\nit finds the abort() problems...\n\n\t\t\tregards, tom lane", "msg_date": "Sat, 26 Jun 2021 17:29:29 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Sat, Jun 26, 2021 at 05:29:29PM -0400, Tom Lane wrote:\n> I'll stick this into the CF list to see if the cfbot agrees that\n> it finds the abort() problems...\n\nThe CF Bot is finding those problems.\n\n> +# Check for functions that libpq must not call.\n> +# (If nm doesn't exist or doesn't work on shlibs, this test will silently\n> +# do nothing, which is fine.)\n> +.PHONY: check-libpq-refs\n> +check-libpq-refs: $(shlib)\n> +\t@! nm -A -g -u $< 2>/dev/null | grep -e abort -e exit\n\n\"abort\" and \"exit\" could be generic terms present in some other\nlibraries. Could be be better to match with \"U abort\" and \"U exit\"\ninstead? MinGW has a nm command, and it has a compatible option set,\nso I think that it should work.\n--\nMichael", "msg_date": "Mon, 28 Jun 2021 16:52:12 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "\n>> +# Check for functions that libpq must not call.\n>> +# (If nm doesn't exist or doesn't work on shlibs, this test will silently\n>> +# do nothing, which is fine.)\n>> +.PHONY: check-libpq-refs\n>> +check-libpq-refs: $(shlib)\n>> +\t@! nm -A -g -u $< 2>/dev/null | grep -e abort -e exit\n>\n> \"abort\" and \"exit\" could be generic terms present in some other\n> libraries. Could be be better to match with \"U abort\" and \"U exit\"\n> instead? MinGW has a nm command, and it has a compatible option set,\n> so I think that it should work.\n\nA possible trick is to add ccp flags such as: -Dexit=exit_BAD \n-Dabort=abort_BAD.\n\n-- \nFabien.\n\n\n", "msg_date": "Mon, 28 Jun 2021 10:20:52 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Michael Paquier <michael@paquier.xyz> writes:\n> On Sat, Jun 26, 2021 at 05:29:29PM -0400, Tom Lane wrote:\n>> I'll stick this into the CF list to see if the cfbot agrees that\n>> it finds the abort() problems...\n\n> The CF Bot is finding those problems.\n\n>> +# Check for functions that libpq must not call.\n>> +# (If nm doesn't exist or doesn't work on shlibs, this test will silently\n>> +# do nothing, which is fine.)\n>> +.PHONY: check-libpq-refs\n>> +check-libpq-refs: $(shlib)\n>> +\t@! nm -A -g -u $< 2>/dev/null | grep -e abort -e exit\n\nYeah, all except on Windows. Not sure if it's worth trying to build\nsome way to make this check on Windows.\n\n> \"abort\" and \"exit\" could be generic terms present in some other\n> libraries. Could be be better to match with \"U abort\" and \"U exit\"\n> instead?\n\nNo, for a couple of reasons:\n\n* nm's output format isn't all that well standardized\n\n* on some platforms, what appears here is \"_abort\".\n\nI would have liked to use \"-w\" in the grep call, but between the\n\"_abort\" case and the \"abort@@GLIBC\" case we see elsewhere, we'd\nbe assuming way too much about what grep will consider to be a word.\n\nIn practice I don't think it's too much of a problem. It doesn't\nmatter whether libc has exported names containing \"exit\", unless\nlibpq or something it imports from src/common or src/port actually\nattempts to call those names. Which I'm not expecting.\n\nA possible counterexample is atexit(3). If libpq ever grew a\nreason to call that then we'd have an issue. It wouldn't be\nthat hard to work around, by adding a grep -v filter. But in\nany case I'm dubious that we could ever make correct use of\natexit(3) in libpq, because we'd have no way to know whether\nthe host application has its own atexit callbacks and if so\nwhether they'll run before or after libpq's. Something like\nisolationtester's atexit callback to PQclose all its connections\nwould risk breakage if libpq tried to clean up via atexit.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 28 Jun 2021 09:47:59 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Fabien COELHO <coelho@cri.ensmp.fr> writes:\n> A possible trick is to add ccp flags such as: -Dexit=exit_BAD \n> -Dabort=abort_BAD.\n\nNot really going to work, at least not without a lot of fragile\nkluges, because the main problem here is to prevent libpq from\n*indirectly* calling those functions via stuff it imports from\nsrc/port or src/common.\n\nIt's possible that we could make it work by generalizing the\npolicy that \"libpq may not call abort/exit\" into \"no PG shlib\nmay call abort/exit\", and then apply the cpp #defines while\ncompiling the xxx_shlib.o variants of those files. This does\nnot seem more attractive than what I proposed, though.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 28 Jun 2021 09:54:16 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "I wrote:\n> This relies on \"nm\" being able to work on shlibs, which it's not\n> required to by POSIX. However, it seems to behave as desired even\n> on my oldest dinosaurs. In any case, if \"nm\" doesn't work then\n> we'll just not detect such problems on that platform, which should\n> be OK as long as the test does work on common platforms.\n> Other than that point I think it's relying only on POSIX-spec\n> features.\n\nFurther dinosaur-wrangling reveals a small problem on prairiedog\n(ancient macOS):\n\n$ nm -A -g -u libpq.5.14.dylib | grep abort\nlibpq.5.14.dylib:fe-connect.o: _abort\nlibpq.5.14.dylib:_eprintf.o: _abort\n\nThe fe-connect.o reference is from PGTHREAD_ERROR of course,\nbut what's that other thing? Investigation finds this:\n\nhttps://opensource.apple.com/source/clang/clang-800.0.38/src/projects/compiler-rt/lib/builtins/eprintf.c.auto.html\n\nIOW it seems that this file is pulled in to implement <assert.h>,\nand the abort call underlies uses of Assert. So that seems fine\nfrom a coding-rule perspective: it's okay for development builds\nto contain core-dumping assertions. It complicates matters for\nthe proposed patch though.\n\nAs far as old macOS goes, it seems like we can work around this\npretty easily, since this version of nm helpfully breaks down\nthe references by .o file: just add a \"grep -v\" pass to reject\n\"_eprintf.o:\". However, if there are any other platforms that\nsimilarly convert assert() calls into some direct reference\nto abort(), it may be harder to work around it elsewhere.\nI guess the only way to know is to see what the buildfarm\nsays.\n\nWorst case, we might only be able to enforce the prohibition\nagainst exit(). That'd be annoying but it's still much better\nthan nothing.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 28 Jun 2021 16:34:35 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "I wrote:\n>> This relies on \"nm\" being able to work on shlibs, which it's not\n>> required to by POSIX. However, it seems to behave as desired even\n>> on my oldest dinosaurs. In any case, if \"nm\" doesn't work then\n>> we'll just not detect such problems on that platform, which should\n>> be OK as long as the test does work on common platforms.\n\nSo I pushed that, and not very surprisingly, it's run into some\nportability problems. gombessa (recent OpenBSD) reports\n\n! nm -A -g -u libpq.so.5.15 2>/dev/null | grep -v '_eprintf\\\\.o:' | grep -e abort -e exit\nlibpq.so.5.15:__cxa_atexit\n\nSo far as I can find, __cxa_atexit is a C++ support routine, so\nI wondered what the heck libpq.so is doing calling it. I managed\nto reproduce the failure here using an OpenBSD installation I had\nat hand, and confirmed that __cxa_atexit is *not* referenced by any\nof the .o files in src/port, src/common, or src/interfaces/libpq.\nSo apparently it's being injected at some fairly low level of the\nshlib support on that platform.\n\nProbably the thing to do is adjust the grep filter to exclude\n__cxa_atexit, but I want to wait awhile and see whether any\nother buildfarm animals report this. morepork at least will\nbe pretty interesting, since it's a slightly older OpenBSD\nvintage.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Tue, 29 Jun 2021 13:15:30 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "I wrote:\n> So I pushed that, and not very surprisingly, it's run into some\n> portability problems. gombessa (recent OpenBSD) reports\n\n> ! nm -A -g -u libpq.so.5.15 2>/dev/null | grep -v '_eprintf\\\\.o:' | grep -e abort -e exit\n> libpq.so.5.15:__cxa_atexit\n\nAfter a few more hours, all of our OpenBSD animals have reported\nthat, on several different OpenBSD releases and with both gcc\nand clang compilers. So at least it's a longstanding platform\nbehavior.\n\nMore troublingly, fossa reports this:\n\n! nm -A -g -u libpq.so.5.15 2>/dev/null | grep -v '_eprintf\\\\.o:' | grep -e abort -e exit\nlibpq.so.5.15: U abort@@GLIBC_2.2.5\n\nWhere is that coming from? hippopotamus and jay, which seem to\nbe different compilers on the same physical machine, aren't showing\nit. That'd lead to the conclusion that icc is injecting abort()\ncalls of its own accord, which seems quite nasty. Lacking an icc\nlicense, I can't poke into that more directly here.\n\nPerhaps we could wrap the test for abort() in something like\n'if \"$CC\" != icc then ...', but ugh.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Tue, 29 Jun 2021 16:39:28 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On 2021-Jun-29, Tom Lane wrote:\n\n> More troublingly, fossa reports this:\n> \n> ! nm -A -g -u libpq.so.5.15 2>/dev/null | grep -v '_eprintf\\\\.o:' | grep -e abort -e exit\n> libpq.so.5.15: U abort@@GLIBC_2.2.5\n> \n> Where is that coming from? hippopotamus and jay, which seem to\n> be different compilers on the same physical machine, aren't showing\n> it. That'd lead to the conclusion that icc is injecting abort()\n> calls of its own accord, which seems quite nasty. Lacking an icc\n> license, I can't poke into that more directly here.\n\nI noticed that the coverage report is not updating, and lo and behold\nit's failing this bit.\n\nI can inspect the built files ... what exactly are you looking for?\n\n-- \n�lvaro Herrera Valdivia, Chile\n<Schwern> It does it in a really, really complicated way\n<crab> why does it need to be complicated?\n<Schwern> Because it's MakeMaker.\n\n\n", "msg_date": "Tue, 29 Jun 2021 19:46:35 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Ah, I nm'd all files in src/interfaces/libpq and got no hits for abort.\nBut I did get one in libpgport_shlib.a:\n\npath_shlib.o:\n U abort\n\t\t 0000000000000320 T canonicalize_path\n\t\t 0000000000000197 T cleanup_path\n\t\t 00000000000009e3 t dir_strcmp\n\t\t ...\n\n-- \n�lvaro Herrera Valdivia, Chile\n\"People get annoyed when you try to debug them.\" (Larry Wall)\n\n\n", "msg_date": "Tue, 29 Jun 2021 19:57:05 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> Ah, I nm'd all files in src/interfaces/libpq and got no hits for abort.\n> But I did get one in libpgport_shlib.a:\n\n> path_shlib.o:\n> U abort\n\nYeah, there is one in get_progname(). But path.o shouldn't be getting\npulled into libpq ... else why aren't all the animals failing?\n\nWhat platform does the coverage report run on exactly?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Tue, 29 Jun 2021 22:59:23 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On 2021-Jun-29, Tom Lane wrote:\n\n> Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> > Ah, I nm'd all files in src/interfaces/libpq and got no hits for abort.\n> > But I did get one in libpgport_shlib.a:\n> \n> > path_shlib.o:\n> > U abort\n> \n> Yeah, there is one in get_progname(). But path.o shouldn't be getting\n> pulled into libpq ... else why aren't all the animals failing?\n\nMaybe there's something about the linker flags being used.\n\n... ah yeah, if I configure with coverage enabled on my machine, it fails in the same way.\n\n> What platform does the coverage report run on exactly?\n\nIt's Debian Buster.\n\nlibpq.so is linked as\n\ngcc -Wall -Wmissing-prototypes -Wpointer-arith -Wdeclaration-after-statement -Werror=vla -Wendif-labels -Wmissing-format-attribute -Wimplicit-fallthrough=3 -Wcast-function-type -Wformat-security -fno-strict-aliasing -fwrapv -fexcess-precision=standard -Wno-format-truncation -Wno-stringop-truncation -fprofile-arcs -ftest-coverage -O0 -pthread -D_REENTRANT -D_THREAD_SAFE -D_POSIX_PTHREAD_SEMANTICS -fPIC -shared -Wl,-soname,libpq.so.5 -Wl,--version-script=exports.list -o libpq.so.5.15 fe-auth-scram.o fe-connect.o fe-exec.o fe-lobj.o fe-misc.o fe-print.o fe-protocol3.o fe-secure.o fe-trace.o legacy-pqsignal.o libpq-events.o pqexpbuffer.o fe-auth.o fe-secure-common.o fe-secure-openssl.o -L../../../src/port -L../../../src/common -lpgcommon_shlib -lpgport_shlib -L/usr/lib/llvm-6.0/lib -Wl,--as-needed -Wl,-rpath,'/usr/local/pgsql/lib',--enable-new-dtags -lssl -lcrypto -lm -lldap_r \n\nand libpgport was just\n\nar crs libpgport_shlib.a fls_shlib.o getpeereid_shlib.o strlcat_shlib.o strlcpy_shlib.o pg_crc32c_sse42_shlib.o pg_crc32c_sb8_shlib.o pg_crc32c_sse42_choose_shlib.o bsearch_arg_shlib.o chklocale_shlib.o erand48_shlib.o inet_net_ntop_shlib.o noblock_shlib.o path_shlib.o pg_bitutils_shlib.o pg_strong_random_shlib.o pgcheckdir_shlib.o pgmkdirp_shlib.o pgsleep_shlib.o pgstrcasecmp_shlib.o pgstrsignal_shlib.o pqsignal_shlib.o qsort_shlib.o qsort_arg_shlib.o quotes_shlib.o snprintf_shlib.o strerror_shlib.o tar_shlib.o thread_shlib.o\n\n-- \n�lvaro Herrera Valdivia, Chile\n https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Wed, 30 Jun 2021 08:58:31 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On 2021-Jun-30, Alvaro Herrera wrote:\n\n> On 2021-Jun-29, Tom Lane wrote:\n> \n> > Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> > > Ah, I nm'd all files in src/interfaces/libpq and got no hits for abort.\n> > > But I did get one in libpgport_shlib.a:\n> > \n> > > path_shlib.o:\n> > > U abort\n> > \n> > Yeah, there is one in get_progname(). But path.o shouldn't be getting\n> > pulled into libpq ... else why aren't all the animals failing?\n> \n> Maybe there's something about the linker flags being used.\n> \n> ... ah yeah, if I configure with coverage enabled on my machine, it fails in the same way.\n\nIf I remove -fprofile-arcs from CFLAGS, then abort is no longer present,\nbut we still get a fail because of __gcov_exit. I suppose if you'd add\nan exception for __cxa_atexit, the same place could use one for\n__gcov_exit.\n\nI'm not sure what to make of the -fprofile-arcs stuff though.\n\n-- \n�lvaro Herrera Valdivia, Chile\n https://www.EnterpriseDB.com/\n\"Java is clearly an example of money oriented programming\" (A. Stepanov)\n\n\n", "msg_date": "Wed, 30 Jun 2021 09:10:06 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On 2021-Jun-30, Alvaro Herrera wrote:\n\n> If I remove -fprofile-arcs from CFLAGS, then abort is no longer present,\n> but we still get a fail because of __gcov_exit. I suppose if you'd add\n> an exception for __cxa_atexit, the same place could use one for\n> __gcov_exit.\n\nI tried the attached patch, and while libpq.so now builds successfully,\nit causes anything that tries to link to libpq fail like\n\ngcc -Wall -Wmissing-prototypes -Wpointer-arith -Wdeclaration-after-statement -Werror=vla -Wendif-labels -Wmissing-format-attribute -Wimplicit-fallthrough=3 -Wcast-function-type -Wformat-security -fno-strict-aliasing -fwrapv -fexcess-precision=standard -Wno-format-truncation -Wno-stringop-truncation -g -fprofile-arcs -ftest-coverage findtimezone.o initdb.o localtime.o -L../../../src/port -L../../../src/common -L../../../src/fe_utils -lpgfeutils -L../../../src/common -lpgcommon -L../../../src/port -lpgport -L../../../src/interfaces/libpq -lpq -Wl,--as-needed -Wl,-rpath,'/pgsql/install/master-coverage/lib',--enable-new-dtags -lpgcommon -lpgport -lpthread -lxml2 -lssl -lcrypto -lz -lreadline -lpthread -lrt -ldl -lm -o initdb\n/usr/bin/ld: initdb: hidden symbol `__gcov_merge_add' in /usr/lib/gcc/x86_64-linux-gnu/8/libgcov.a(_gcov_merge_add.o) is referenced by DSO\n/usr/bin/ld: final link failed: bad value\ncollect2: error: ld returned 1 exit status\nmake[3]: *** [Makefile:43: initdb] Error 1\n\nso this doesn't look too promising.\n\n-- \n�lvaro Herrera Valdivia, Chile\n https://www.EnterpriseDB.com/", "msg_date": "Wed, 30 Jun 2021 09:46:59 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> Maybe there's something about the linker flags being used.\n> ... ah yeah, if I configure with coverage enabled on my machine, it fails in the same way.\n\nAh-hah, yeah, I see it too if I enable profiling. I can confirm\nthat it's not from the abort() call in path.c, because it's still\nthere if I remove that. So this is another case where build\ninfrastructure is injecting abort() calls we didn't ask for.\n\nBetween this and the icc case, I'm now inclined to give up on\ntrying to forbid abort() calls in libpq. I think the value-add\nfor that is a lot lower than it is for exit() anyway. abort()\nis something one doesn't toss around lightly.\n\nYou mentioned __gcov_exit, but I'm not sure if we need an\nexception for that. I see it referenced by the individual .o\nfiles, but the completed .so has no such reference, so at least\non RHEL8 it's apparently satisfied during .so linkage. Do you\nsee something different?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 30 Jun 2021 10:09:32 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On 2021-Jun-30, Tom Lane wrote:\n\n> Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> > Maybe there's something about the linker flags being used.\n> > ... ah yeah, if I configure with coverage enabled on my machine, it fails in the same way.\n> \n> Ah-hah, yeah, I see it too if I enable profiling. I can confirm\n> that it's not from the abort() call in path.c, because it's still\n> there if I remove that. So this is another case where build\n> infrastructure is injecting abort() calls we didn't ask for.\n\nHah, I didn't think to try that.\n\n> Between this and the icc case, I'm now inclined to give up on\n> trying to forbid abort() calls in libpq. I think the value-add\n> for that is a lot lower than it is for exit() anyway. abort()\n> is something one doesn't toss around lightly.\n\nNo objections to that.\n\n> You mentioned __gcov_exit, but I'm not sure if we need an\n> exception for that. I see it referenced by the individual .o\n> files, but the completed .so has no such reference, so at least\n> on RHEL8 it's apparently satisfied during .so linkage. Do you\n> see something different?\n\nWell, not really. I saw it but only after I removed -fprofile-arcs from\nMakefile.shlib's link line; but per my other email, that doesn't really\nwork.\n\nEverything seems to work well for me after removing abort from that grep.\n\n-- \n�lvaro Herrera Valdivia, Chile\n https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Wed, 30 Jun 2021 10:39:29 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> On 2021-Jun-30, Tom Lane wrote:\n>> You mentioned __gcov_exit, but I'm not sure if we need an\n>> exception for that. I see it referenced by the individual .o\n>> files, but the completed .so has no such reference, so at least\n>> on RHEL8 it's apparently satisfied during .so linkage. Do you\n>> see something different?\n\n> Well, not really. I saw it but only after I removed -fprofile-arcs from\n> Makefile.shlib's link line; but per my other email, that doesn't really\n> work.\n> Everything seems to work well for me after removing abort from that grep.\n\nOK, thanks, will push a fix momentarily.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 30 Jun 2021 10:42:55 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On 2021-Jun-30, Tom Lane wrote:\n\n> OK, thanks, will push a fix momentarily.\n\n(BTW since the _eprintf.o stuff comes from _abort, I suppose you're\ngoing to remove that grep -v too?)\n\n-- \n�lvaro Herrera 39�49'30\"S 73�17'W\n https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Wed, 30 Jun 2021 10:50:01 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> (BTW since the _eprintf.o stuff comes from _abort, I suppose you're\n> going to remove that grep -v too?)\n\nRight, I did that.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 30 Jun 2021 10:53:05 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "I wrote:\n> OK, thanks, will push a fix momentarily.\n\nDid so, and look what popped up on wrasse [1]:\n\n! nm -A -g -u libpq.so.5.15 2>/dev/null | grep -v __cxa_atexit | grep exit\nlibpq.so.5.15: [765]\t| 232544| 248|FUNC |GLOB |3 |14 |PQexitPipelineMode\n\nThis makes no sense, because (a) wrasse was happy with the previous\nversion, and (b) surely the \"-u\" switch should prevent nm from\nprinting PQexitPipelineMode. Noah, did you change anything about\nwrasse's configuration today?\n\n\t\t\tregards, tom lane\n\n[1] https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=wrasse&dt=2021-06-30%2014%3A58%3A15\n\n\n", "msg_date": "Wed, 30 Jun 2021 12:06:47 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On 26.06.21 23:29, Tom Lane wrote:\n> After some thought I propose that what we really want is to prevent\n> any calls of abort() or exit() from inside libpq. Attached is a\n> draft patch to do that.\n\nCould we set this rule up a little bit differently so that it is only \nrun when the library is built.\n\nRight now, make world on a built tree makes 17 calls to this \"nm\" line, \nand make check-world calls it 81 times. I think once would be enough. ;-)\n\n\n", "msg_date": "Wed, 30 Jun 2021 19:24:26 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n> Could we set this rule up a little bit differently so that it is only \n> run when the library is built.\n> Right now, make world on a built tree makes 17 calls to this \"nm\" line, \n> and make check-world calls it 81 times. I think once would be enough. ;-)\n\nHmm, didn't realize that would happen. Will see what can be done.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 30 Jun 2021 16:15:14 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "I wrote:\n> Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n>> Could we set this rule up a little bit differently so that it is only \n>> run when the library is built.\n>> Right now, make world on a built tree makes 17 calls to this \"nm\" line, \n>> and make check-world calls it 81 times. I think once would be enough. ;-)\n\n> Hmm, didn't realize that would happen. Will see what can be done.\n\nLooks like we'd have to make use of a dummy stamp-file, more or less\nas attached. Any objections?\n\n\t\t\tregards, tom lane", "msg_date": "Wed, 30 Jun 2021 18:29:11 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Wed, 2021-06-30 at 18:29 -0400, Tom Lane wrote:\r\n> I wrote:\r\n> > Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\r\n> > > Could we set this rule up a little bit differently so that it is only \r\n> > > run when the library is built.\r\n> > > Right now, make world on a built tree makes 17 calls to this \"nm\" line, \r\n> > > and make check-world calls it 81 times. I think once would be enough. ;-)\r\n> > Hmm, didn't realize that would happen. Will see what can be done.\r\n> \r\n> Looks like we'd have to make use of a dummy stamp-file, more or less\r\n> as attached. Any objections?\r\n\r\nSpitballing -- if you don't like the stamp file, you could add the\r\ncheck to the end of the $(shlib) rule, surrounded by an ifeq check.\r\nThen .DELETE_ON_ERROR should take care of the rest, I think.\r\n\r\n--Jacob\r\n", "msg_date": "Wed, 30 Jun 2021 22:41:01 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Jacob Champion <pchampion@vmware.com> writes:\n> On Wed, 2021-06-30 at 18:29 -0400, Tom Lane wrote:\n>> Looks like we'd have to make use of a dummy stamp-file, more or less\n>> as attached. Any objections?\n\n> Spitballing -- if you don't like the stamp file, you could add the\n> check to the end of the $(shlib) rule, surrounded by an ifeq check.\n> Then .DELETE_ON_ERROR should take care of the rest, I think.\n\nHmm ... I'd been thinking we don't use .DELETE_ON_ERROR, but on\nsecond look we do, so that could be a plausible approach.\n\nOn balance though, the separate rule seems better, because\n.DELETE_ON_ERROR would destroy the evidence about why \"nm\"\nfailed, which could be annoying when investigating problems.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 30 Jun 2021 18:56:56 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Wed, 2021-06-30 at 18:56 -0400, Tom Lane wrote:\r\n> Jacob Champion <pchampion@vmware.com> writes:\r\n> > On Wed, 2021-06-30 at 18:29 -0400, Tom Lane wrote:\r\n> > > Looks like we'd have to make use of a dummy stamp-file, more or less\r\n> > > as attached. Any objections?\r\n> > Spitballing -- if you don't like the stamp file, you could add the\r\n> > check to the end of the $(shlib) rule, surrounded by an ifeq check.\r\n> > Then .DELETE_ON_ERROR should take care of the rest, I think.\r\n> \r\n> Hmm ... I'd been thinking we don't use .DELETE_ON_ERROR, but on\r\n> second look we do, so that could be a plausible approach.\r\n> \r\n> On balance though, the separate rule seems better, because\r\n> .DELETE_ON_ERROR would destroy the evidence about why \"nm\"\r\n> failed, which could be annoying when investigating problems.\r\n\r\nGood point. +1 to the stamp approach, then.\r\n\r\n--Jacob\r\n", "msg_date": "Wed, 30 Jun 2021 22:58:22 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Wed, Jun 30, 2021 at 12:06:47PM -0400, Tom Lane wrote:\n> I wrote:\n> > OK, thanks, will push a fix momentarily.\n> \n> Did so, and look what popped up on wrasse [1]:\n> \n> ! nm -A -g -u libpq.so.5.15 2>/dev/null | grep -v __cxa_atexit | grep exit\n> libpq.so.5.15: [765]\t| 232544| 248|FUNC |GLOB |3 |14 |PQexitPipelineMode\n> \n> This makes no sense, because (a) wrasse was happy with the previous\n> version, and (b) surely the \"-u\" switch should prevent nm from\n> printing PQexitPipelineMode. Noah, did you change anything about\n> wrasse's configuration today?\n\nNo, and wrasse still succeeds at \"git checkout e45b0df^\". Solaris\n/usr/bin/grep doesn't support \"-e\":\n\n[nm@gcc-solaris11 5:0 2021-06-30T22:23:29 postgresql 0]$ echo exit | grep -e exit\ngrep: illegal option -- e\nUsage: grep [-c|-l|-q] -bhinsvw pattern file . . .\n[nm@gcc-solaris11 5:0 2021-06-30T22:23:43 postgresql 2]$ echo exit | grep exit\nexit\n[nm@gcc-solaris11 5:0 2021-06-30T22:24:16 postgresql 0]$ echo exit | /usr/xpg4/bin/grep -e exit\nexit\n\nThat concealed things in the previous version. You can see those \"illegal\noption\" messages in the last passing run:\nhttps://buildfarm.postgresql.org/cgi-bin/show_stage_log.pl?nm=wrasse&dt=2021-06-30%2008%3A57%3A27&stg=make\n\n\n", "msg_date": "Wed, 30 Jun 2021 18:23:28 -0700", "msg_from": "Noah Misch <noah@leadboat.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On 2021-Jun-30, Noah Misch wrote:\n\n> No, and wrasse still succeeds at \"git checkout e45b0df^\". Solaris\n> /usr/bin/grep doesn't support \"-e\":\n\nI think this means the rule should use $(GREP), which is /usr/bin/ggrep\nin wrasse,\n\nchecking for grep that handles long lines and -e... /usr/bin/ggrep\n\n-- \n�lvaro Herrera 39�49'30\"S 73�17'W\n https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Wed, 30 Jun 2021 21:44:02 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> On 2021-Jun-30, Noah Misch wrote:\n>> No, and wrasse still succeeds at \"git checkout e45b0df^\". Solaris\n>> /usr/bin/grep doesn't support \"-e\":\n\n> I think this means the rule should use $(GREP), which is /usr/bin/ggrep\n> in wrasse,\n\nAh, my mistake. Although we're still left with the question of why\nSolaris' \"nm\" doesn't support the POSIX-required options.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 30 Jun 2021 23:45:10 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Wed, Jun 30, 2021 at 11:45:10PM -0400, Tom Lane wrote:\n> we're still left with the question of why\n> Solaris' \"nm\" doesn't support the POSIX-required options.\n\nIn POSIX, -g and -u are mutually exclusive. Solaris ignores all but the first\nof these in a command:\n\n[nm@gcc-solaris11 5:0 2021-07-01T06:48:54 postgresql 1]$ /usr/bin/nm -u -g src/interfaces/libpq/libpq.so|grep exec\nnm: -u or -e set, -g ignored\n[nm@gcc-solaris11 5:0 2021-07-01T06:49:41 postgresql 1]$ /usr/bin/nm -g -u src/interfaces/libpq/libpq.so|grep exec\nnm: -e or -g set, -u ignored\n[405] | 208320| 84|FUNC |GLOB |3 |14 |PQexec\n[818] | 208416| 128|FUNC |GLOB |3 |14 |PQexecParams\n[729] | 208672| 112|FUNC |GLOB |3 |14 |PQexecPrepared\n[nm@gcc-solaris11 5:0 2021-07-01T06:49:45 postgresql 0]$ /usr/bin/nm -u src/interfaces/libpq/libpq.so|grep exec\n[nm@gcc-solaris11 5:0 2021-07-01T06:49:48 postgresql 1]$ \n\n\n", "msg_date": "Wed, 30 Jun 2021 21:53:09 -0700", "msg_from": "Noah Misch <noah@leadboat.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Noah Misch <noah@leadboat.com> writes:\n> On Wed, Jun 30, 2021 at 11:45:10PM -0400, Tom Lane wrote:\n>> we're still left with the question of why\n>> Solaris' \"nm\" doesn't support the POSIX-required options.\n\n> In POSIX, -g and -u are mutually exclusive. Solaris ignores all but the first\n> of these in a command:\n\nI've just re-read the POSIX spec for \"nm\", and I do not see anything there\nthat would support that interpretation. Still, we can try it without -g\nand see what else breaks.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 01 Jul 2021 01:20:48 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Thu, Jul 01, 2021 at 01:20:48AM -0400, Tom Lane wrote:\n> Noah Misch <noah@leadboat.com> writes:\n> > On Wed, Jun 30, 2021 at 11:45:10PM -0400, Tom Lane wrote:\n> >> we're still left with the question of why\n> >> Solaris' \"nm\" doesn't support the POSIX-required options.\n> \n> > In POSIX, -g and -u are mutually exclusive. Solaris ignores all but the first\n> > of these in a command:\n> \n> I've just re-read the POSIX spec for \"nm\", and I do not see anything there\n> that would support that interpretation. Still, we can try it without -g\n> and see what else breaks.\n\nhttps://pubs.opengroup.org/onlinepubs/9699919799/utilities/nm.html says:\n\n nm [-APv] [-g|-u] [-t format] file...\n\nIf the options weren't mutually-exclusive, it would say:\n\n nm [-APvgu] [-t format] file...\n\n\n", "msg_date": "Wed, 30 Jun 2021 22:23:52 -0700", "msg_from": "Noah Misch <noah@leadboat.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Noah Misch <noah@leadboat.com> writes:\n> On Thu, Jul 01, 2021 at 01:20:48AM -0400, Tom Lane wrote:\n>> I've just re-read the POSIX spec for \"nm\", and I do not see anything there\n>> that would support that interpretation.\n\n> https://pubs.opengroup.org/onlinepubs/9699919799/utilities/nm.html says:\n> nm [-APv] [-g|-u] [-t format] file...\n\nOh, right, I failed to look carefully at the syntax diagram.\nLocal testing also supports the conclusion that -g isn't needed\nhere, so pushed that way. Thanks for investigating that!\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 01 Jul 2021 10:46:54 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> I think this means the rule should use $(GREP), which is /usr/bin/ggrep\n> in wrasse,\n\nI didn't install this change, because it isn't actually needed at the\nmoment, and we aren't using $(GREP) anywhere else. Might be a bridge\nto cross in future.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 01 Jul 2021 10:48:42 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On 01.07.21 00:41, Jacob Champion wrote:\n> On Wed, 2021-06-30 at 18:29 -0400, Tom Lane wrote:\n>> I wrote:\n>>> Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n>>>> Could we set this rule up a little bit differently so that it is only\n>>>> run when the library is built.\n>>>> Right now, make world on a built tree makes 17 calls to this \"nm\" line,\n>>>> and make check-world calls it 81 times. I think once would be enough. ;-)\n>>> Hmm, didn't realize that would happen. Will see what can be done.\n>>\n>> Looks like we'd have to make use of a dummy stamp-file, more or less\n>> as attached. Any objections?\n> \n> Spitballing -- if you don't like the stamp file, you could add the\n> check to the end of the $(shlib) rule, surrounded by an ifeq check.\n> Then .DELETE_ON_ERROR should take care of the rest, I think.\n\nSomewhere in the $(shlib) rule would seem most appropriate. But I don't \nunderstand the rest: What ifeq, and why .DELETE_ON_ERROR?\n\n\n", "msg_date": "Thu, 1 Jul 2021 20:05:59 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n> On 01.07.21 00:41, Jacob Champion wrote:\n>> Spitballing -- if you don't like the stamp file, you could add the\n>> check to the end of the $(shlib) rule, surrounded by an ifeq check.\n>> Then .DELETE_ON_ERROR should take care of the rest, I think.\n\n> Somewhere in the $(shlib) rule would seem most appropriate. But I don't \n> understand the rest: What ifeq, and why .DELETE_ON_ERROR?\n\nThe variant of this I'd been thinking of was\n\n $(shlib): $(OBJS) | $(SHLIB_PREREQS)\n \t$(LINK.shared) -o $@ $(OBJS) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK)\n+ifneq (,$(SHLIB_EXTRA_ACTION))\n+\t$(SHLIB_EXTRA_ACTION)\n+endif\n\n(and similarly in several other places); then libpq's Makefile\ncould set SHLIB_EXTRA_ACTION to the desired thing.\n\nThe problem then is, what happens when the extra action fails?\nWithout .DELETE_ON_ERROR, the shlib is still there and the next\nmake run will think everything's good.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 01 Jul 2021 14:14:24 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Thu, 2021-07-01 at 14:14 -0400, Tom Lane wrote:\r\n> Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\r\n> > Somewhere in the $(shlib) rule would seem most appropriate. But I don't \r\n> > understand the rest: What ifeq, and why .DELETE_ON_ERROR?\r\n> \r\n> The variant of this I'd been thinking of was\r\n> \r\n> $(shlib): $(OBJS) | $(SHLIB_PREREQS)\r\n> \t$(LINK.shared) -o $@ $(OBJS) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK)\r\n> +ifneq (,$(SHLIB_EXTRA_ACTION))\r\n> +\t$(SHLIB_EXTRA_ACTION)\r\n> +endif\r\n> \r\n> (and similarly in several other places); then libpq's Makefile\r\n> could set SHLIB_EXTRA_ACTION to the desired thing.\r\n> \r\n> The problem then is, what happens when the extra action fails?\r\n> Without .DELETE_ON_ERROR, the shlib is still there and the next\r\n> make run will think everything's good.\r\n\r\nYep, that was pretty much what was in my head. ifeq (or ifneq in your\r\nexample) to gate the extra nm check, and .DELETE_ON_ERROR to make the\r\nfailure stick for future make invocations.\r\n\r\n--Jacob\r\n", "msg_date": "Thu, 1 Jul 2021 18:21:04 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On 01.07.21 20:14, Tom Lane wrote:\n> The variant of this I'd been thinking of was\n> \n> $(shlib): $(OBJS) | $(SHLIB_PREREQS)\n> \t$(LINK.shared) -o $@ $(OBJS) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK)\n> +ifneq (,$(SHLIB_EXTRA_ACTION))\n> +\t$(SHLIB_EXTRA_ACTION)\n> +endif\n> \n> (and similarly in several other places); then libpq's Makefile\n> could set SHLIB_EXTRA_ACTION to the desired thing.\n\nRight, that looks sensible. (Maybe the ifneq isn't actually necessary, \nsince if the variable is not set, nothing would happen.)\n\n> The problem then is, what happens when the extra action fails?\n> Without .DELETE_ON_ERROR, the shlib is still there and the next\n> make run will think everything's good.\n\nRight. .DELETE_ON_ERROR is already set in Makefile.global, so it's not \nnecessary to set it again.\n\n\n", "msg_date": "Thu, 1 Jul 2021 21:03:58 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n> On 01.07.21 20:14, Tom Lane wrote:\n>> The problem then is, what happens when the extra action fails?\n>> Without .DELETE_ON_ERROR, the shlib is still there and the next\n>> make run will think everything's good.\n\n> Right. .DELETE_ON_ERROR is already set in Makefile.global, so it's not \n> necessary to set it again.\n\nRight. Since we use that, we don't actually have that problem.\nWhat we'd have instead is that debugging an unexpected failure\nof the \"extra action\" would be painful, because there would be\nno way short of modifying the Makefiles to create its input data.\nSo I think the other solution with a separate rule is better.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 01 Jul 2021 15:10:05 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Now it's hoverfly:\n\n! nm -A -u libpq.so.5 2>/dev/null | grep -v __cxa_atexit | grep exit\nlibpq.so.5: atexit U -\nlibpq.so.5: pthread_exit U -\n\nhttps://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=hoverfly&dt=2021-07-02%2010%3A10%3A29\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"Use it up, wear it out, make it do, or do without\"\n\n\n", "msg_date": "Fri, 2 Jul 2021 10:59:33 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> Now it's hoverfly:\n> ! nm -A -u libpq.so.5 2>/dev/null | grep -v __cxa_atexit | grep exit\n> libpq.so.5: atexit U -\n> libpq.so.5: pthread_exit U -\n\nUgh. What in the world is producing those references?\n\n(As I mentioned upthread, I'm quite suspicious of libpq trying to\nperform any actions in an atexit callback, because of the uncertainty\nabout whether some later atexit callback could try to use libpq\nfunctions. So this seems like it might be an actual bug.)\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 02 Jul 2021 11:20:17 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Wed, 2021-06-30 at 10:42 -0400, Tom Lane wrote:\r\n> Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\r\n> > On 2021-Jun-30, Tom Lane wrote:\r\n> > > You mentioned __gcov_exit, but I'm not sure if we need an\r\n> > > exception for that. I see it referenced by the individual .o\r\n> > > files, but the completed .so has no such reference, so at least\r\n> > > on RHEL8 it's apparently satisfied during .so linkage. Do you\r\n> > > see something different?\r\n> > Well, not really. I saw it but only after I removed -fprofile-arcs from\r\n> > Makefile.shlib's link line; but per my other email, that doesn't really\r\n> > work.\r\n> > Everything seems to work well for me after removing abort from that grep.\r\n> \r\n> OK, thanks, will push a fix momentarily.\r\n\r\nWith latest HEAD, building with --enable-coverage still fails on my\r\nUbuntu 20.04:\r\n\r\n ! nm -A -u libpq.so.5.15 2>/dev/null | grep -v __cxa_atexit | grep exit\r\n libpq.so.5.15: U exit@@GLIBC_2.2.5\r\n\r\nI don't see any exit references in the libpq objects or in\r\nlibpgport_shlib, so it seems like libpgcommon_shlib is the culprit... I\r\nassume turning off optimizations leads to less dead code elimination?\r\n\r\n--Jacob\r\n", "msg_date": "Fri, 2 Jul 2021 22:15:45 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Jacob Champion <pchampion@vmware.com> writes:\n> With latest HEAD, building with --enable-coverage still fails on my\n> Ubuntu 20.04:\n\n> ! nm -A -u libpq.so.5.15 2>/dev/null | grep -v __cxa_atexit | grep exit\n> libpq.so.5.15: U exit@@GLIBC_2.2.5\n\nHm, weird. I don't see that here on RHEL8, and \nhttps://coverage.postgresql.org seems to be working so it doesn't fail on\nAlvaro's Debian setup either. What configure options are you using?\nDoes \"nm -u\" report \"exit\" being referenced from any *.o in libpq,\nor from any *_shlib.o in src/port/ or src/common/ ?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 02 Jul 2021 18:20:25 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Fri, 2021-07-02 at 18:20 -0400, Tom Lane wrote:\r\n> What configure options are you using?\r\n\r\nJust `./configure --enable-coverage`, nothing else. I distclean'd right\r\nbefore for good measure.\r\n\r\n> Does \"nm -u\" report \"exit\" being referenced from any *.o in libpq,\r\n> or from any *_shlib.o in src/port/ or src/common/ ?\r\n\r\nOnly src/common:\r\n\r\n controldata_utils_shlib.o:\r\n U close\r\n U __errno_location\r\n U exit\r\n ...\r\n fe_memutils_shlib.o:\r\n U exit\r\n ...\r\n file_utils_shlib.o:\r\n U close\r\n U closedir\r\n U __errno_location\r\n U exit\r\n ...\r\n hex_shlib.o:\r\n U exit\r\n ...\r\n psprintf_shlib.o:\r\n U __errno_location\r\n U exit\r\n ...\r\n stringinfo_shlib.o:\r\n U __errno_location\r\n U exit\r\n ...\r\n username_shlib.o:\r\n U __errno_location\r\n U exit\r\n ...\r\n\r\n--Jacob\r\n", "msg_date": "Fri, 2 Jul 2021 22:38:10 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Jacob Champion <pchampion@vmware.com> writes:\n> On Fri, 2021-07-02 at 18:20 -0400, Tom Lane wrote:\n>> What configure options are you using?\n\n> Just `./configure --enable-coverage`, nothing else. I distclean'd right\n> before for good measure.\n\nHmph. There's *something* different about your setup from what\neither Alvaro or I tried. What's the compiler (and version)?\nWhat's the platform exactly?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 02 Jul 2021 18:45:21 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Fri, 2021-07-02 at 18:45 -0400, Tom Lane wrote:\r\n> Jacob Champion <pchampion@vmware.com> writes:\r\n> > On Fri, 2021-07-02 at 18:20 -0400, Tom Lane wrote:\r\n> > > What configure options are you using?\r\n> > Just `./configure --enable-coverage`, nothing else. I distclean'd right\r\n> > before for good measure.\r\n> \r\n> Hmph. There's *something* different about your setup from what\r\n> either Alvaro or I tried. What's the compiler (and version)?\r\n> What's the platform exactly?\r\n\r\n $ gcc --version\r\n gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0\r\n Copyright (C) 2019 Free Software Foundation, Inc.\r\n ...\r\n\r\n $ cat /etc/os-release\r\n NAME=\"Ubuntu\"\r\n VERSION=\"20.04.2 LTS (Focal Fossa)\"\r\n ID=ubuntu\r\n ID_LIKE=debian\r\n PRETTY_NAME=\"Ubuntu 20.04.2 LTS\"\r\n VERSION_ID=\"20.04\"\r\n ...\r\n\r\n $ uname -a\r\n Linux HOSTNAME 5.8.0-59-generic #66~20.04.1-Ubuntu SMP Thu Jun 17 11:14:10 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux\r\n\r\n--Jacob\r\n", "msg_date": "Fri, 2 Jul 2021 22:51:55 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On 2021-Jul-02, Jacob Champion wrote:\n\n> Only src/common:\n> \n> controldata_utils_shlib.o:\n> U close\n> U __errno_location\n> U exit\n\nActually, I do see these in the .o files as well, but they don't make it\nto the .a file.\n\ngcc here is 8.3.0.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Fri, 2 Jul 2021 19:03:39 -0400", "msg_from": "\"alvherre@alvh.no-ip.org\" <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "\"alvherre@alvh.no-ip.org\" <alvherre@alvh.no-ip.org> writes:\n> gcc here is 8.3.0.\n\nHmmm ... mine is 8.4.1.\n\nI'm about to go out to dinner, but will check into this with some\nnewer gcc versions later.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 02 Jul 2021 19:08:41 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Fri, Jul 02, 2021 at 11:20:17AM -0400, Tom Lane wrote:\n> Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> > Now it's hoverfly:\n> > ! nm -A -u libpq.so.5 2>/dev/null | grep -v __cxa_atexit | grep exit\n> > libpq.so.5: atexit U -\n> > libpq.so.5: pthread_exit U -\n> \n> Ugh. What in the world is producing those references?\n\nThose come from a statically-linked libldap_r:\n\n$ nm -A -u /home/nm/sw/nopath/openldap-64/lib/libldap_r.a|grep exit\n/home/nm/sw/nopath/openldap-64/lib/libldap_r.a[tpool.o]: .ldap_pvt_thread_exit U -\n/home/nm/sw/nopath/openldap-64/lib/libldap_r.a[thr_posix.o]: .pthread_exit U -\n/home/nm/sw/nopath/openldap-64/lib/libldap_r.a[init.o]: .atexit U -\n\n\n", "msg_date": "Fri, 2 Jul 2021 17:16:39 -0700", "msg_from": "Noah Misch <noah@leadboat.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Noah Misch <noah@leadboat.com> writes:\n> On Fri, Jul 02, 2021 at 11:20:17AM -0400, Tom Lane wrote:\n>> Ugh. What in the world is producing those references?\n\n> Those come from a statically-linked libldap_r:\n\nBlech! I wonder if there is some way to avoid counting that.\nIt's not really hard to imagine that such a library might\ncontain an exit() call, for example, thus negating our test\naltogether.\n\nI'm now wondering about applying the test to *.o in libpq,\nas well as libpgport_shlib.a and libpgcommon_shlib.a.\nThe latter would require some code changes, and it would make\nthe prohibition extend further than libpq alone. On the bright\nside, we could reinstate the check for abort().\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sat, 03 Jul 2021 10:10:13 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "I wrote:\n> I'm now wondering about applying the test to *.o in libpq,\n> as well as libpgport_shlib.a and libpgcommon_shlib.a.\n> The latter would require some code changes, and it would make\n> the prohibition extend further than libpq alone. On the bright\n> side, we could reinstate the check for abort().\n\nAfter consuming a bit more caffeine, I'm afraid that won't work.\nI'd imagined leaving, e.g., psprintf.c out of libpgcommon_shlib.a.\nBut if someone mistakenly introduced a psprintf call into libpq,\nit'd still compile just fine; the symbol would be resolved against\npsprintf in the calling application's code. We'd only detect a\nfailure when trying to use libpq with an app that didn't contain\nthat function, which feels like something that our own testing\ncould miss.\n\nWhat I'm now thinking about is restricting the test to only be run on\nplatforms where use of foo.a libraries is deprecated, so that we can\nbe pretty sure that we won't hit this situation. Even if we only\nrun the test on Linux, that'd be plenty to catch any mistakes.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sat, 03 Jul 2021 10:45:59 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "I wrote:\n> Hmmm ... mine is 8.4.1.\n> I'm about to go out to dinner, but will check into this with some\n> newer gcc versions later.\n\nTried --enable-coverage on Fedora 34 (with gcc 11.1.1) and sure\nenough there's an exit() call being inserted. I've pushed a fix\nto just disable the check altogether in --enable-coverage builds.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sat, 03 Jul 2021 11:23:56 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Sat, Jul 03, 2021 at 10:45:59AM -0400, Tom Lane wrote:\n> I'd imagined leaving, e.g., psprintf.c out of libpgcommon_shlib.a.\n> But if someone mistakenly introduced a psprintf call into libpq,\n> it'd still compile just fine; the symbol would be resolved against\n> psprintf in the calling application's code.\n\nI think that would fail to compile on Windows, where such references need\nexported symbols. We don't make an exports file for applications other than\npostgres.exe. So the strategy that inspired this may work.\n\n> What I'm now thinking about is restricting the test to only be run on\n> platforms where use of foo.a libraries is deprecated, so that we can\n> be pretty sure that we won't hit this situation. Even if we only\n> run the test on Linux, that'd be plenty to catch any mistakes.\n\nHmm. Static libraries are the rarer case on both AIX and Linux, but I'm not\naware of a relevant deprecation on either platform. If it comes this to, I'd\nbe more inclined to control the Makefile rule with an environment variable\n(e.g. ENFORCE_LIBC_CALL_RESTRICTIONS) instead of reacting to the platform.\n\n\n", "msg_date": "Sat, 3 Jul 2021 14:46:58 -0700", "msg_from": "Noah Misch <noah@leadboat.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Noah Misch <noah@leadboat.com> writes:\n> On Sat, Jul 03, 2021 at 10:45:59AM -0400, Tom Lane wrote:\n>> What I'm now thinking about is restricting the test to only be run on\n>> platforms where use of foo.a libraries is deprecated, so that we can\n>> be pretty sure that we won't hit this situation. Even if we only\n>> run the test on Linux, that'd be plenty to catch any mistakes.\n\n> Hmm. Static libraries are the rarer case on both AIX and Linux, but I'm not\n> aware of a relevant deprecation on either platform. If it comes this to, I'd\n> be more inclined to control the Makefile rule with an environment variable\n> (e.g. ENFORCE_LIBC_CALL_RESTRICTIONS) instead of reacting to the platform.\n\nThat'd require buildfarm owner intervention, as well as intervention\nby users. Which seems like exporting our problems onto them. I'd\nreally rather not go that way if we can avoid it.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sat, 03 Jul 2021 18:44:20 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Sat, Jul 03, 2021 at 06:44:20PM -0400, Tom Lane wrote:\n> Noah Misch <noah@leadboat.com> writes:\n> > On Sat, Jul 03, 2021 at 10:45:59AM -0400, Tom Lane wrote:\n> >> What I'm now thinking about is restricting the test to only be run on\n> >> platforms where use of foo.a libraries is deprecated, so that we can\n> >> be pretty sure that we won't hit this situation. Even if we only\n> >> run the test on Linux, that'd be plenty to catch any mistakes.\n> \n> > Hmm. Static libraries are the rarer case on both AIX and Linux, but I'm not\n> > aware of a relevant deprecation on either platform. If it comes this to, I'd\n> > be more inclined to control the Makefile rule with an environment variable\n> > (e.g. ENFORCE_LIBC_CALL_RESTRICTIONS) instead of reacting to the platform.\n> \n> That'd require buildfarm owner intervention, as well as intervention\n> by users. Which seems like exporting our problems onto them. I'd\n> really rather not go that way if we can avoid it.\n\nI like that goal, though we'll have to see how difficult it proves. As of\ntoday, a GNU/Linux user building against static OpenLDAP will get a failure,\nright? That would export work onto that user, spuriously. Since the non-AIX\nuser count dwarfs the AIX user count, expect a user complaint from non-AIX\nfirst.\n\nWe'd get something like 95% of the value by running the test on one Windows\nbuildfarm member and one non-Windows buildfarm member. If you did gate the\ncheck on an environment variable, there would be no need to angle for broad\nadoption. Still, I agree avoiding that configuration step is nice, all else\nbeing equal. A strategy not having either of those drawbacks would be to skip\nthe test if libpq.so contains a definition of libpq_unbind(). If any other\ndependency contains exit calls, we'd likewise probe for one symbol of that\nlibrary and skip the test if presence of that symbol reveals static linking.\n(That's maintenance-prone in its own way, but a maintenance-free strategy has\nnot appeared.)\n\n\n", "msg_date": "Fri, 9 Jul 2021 02:59:52 -0700", "msg_from": "Noah Misch <noah@leadboat.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "Noah Misch <noah@leadboat.com> writes:\n> On Sat, Jul 03, 2021 at 06:44:20PM -0400, Tom Lane wrote:\n>> That'd require buildfarm owner intervention, as well as intervention\n>> by users. Which seems like exporting our problems onto them. I'd\n>> really rather not go that way if we can avoid it.\n\n> I like that goal, though we'll have to see how difficult it proves. As of\n> today, a GNU/Linux user building against static OpenLDAP will get a failure,\n> right? That would export work onto that user, spuriously.\n\nAs a former packager for Red Hat, my response would be \"you're doing it\nwrong\". Nobody on any Linux distro should *ever* statically link code\nfrom one package into code from another, because they are going to create\nuntold pain for themselves when (not if) the first package is updated.\nSo I flat out reject that as a valid use-case.\n\nIt may be that that ethos is not so strongly baked-in on other platforms.\nBut I'm content to wait and see if there are complaints before rescinding\nthe automatic test; and if there are, I'd prefer to deal with it by just\nbacking off to running the test on Linux only.\n\n> We'd get something like 95% of the value by running the test on one Windows\n> buildfarm member and one non-Windows buildfarm member.\n\nTrue. But that just brings up the point that we aren't running the test\nat all on MSVC builds right now. I have no idea how to do that, do you?\n\n> ... A strategy not having either of those drawbacks would be to skip\n> the test if libpq.so contains a definition of libpq_unbind().\n\nI assume you meant some OpenLDAP symbol?\n\n> If any other\n> dependency contains exit calls, we'd likewise probe for one symbol of that\n> library and skip the test if presence of that symbol reveals static linking.\n> (That's maintenance-prone in its own way, but a maintenance-free strategy has\n> not appeared.)\n\nI'm more worried about the risk of failing to detect problems at all,\nin case somebody fat-fingers things in a way that causes the test to\nbe skipped everywhere.\n\nI'll keep that way in mind if we conclude that the existing way is\nunworkable, but so far I don't think it is.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 09 Jul 2021 10:06:18 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" }, { "msg_contents": "On Fri, Jul 09, 2021 at 10:06:18AM -0400, Tom Lane wrote:\n> Noah Misch <noah@leadboat.com> writes:\n> > On Sat, Jul 03, 2021 at 06:44:20PM -0400, Tom Lane wrote:\n> >> That'd require buildfarm owner intervention, as well as intervention\n> >> by users. Which seems like exporting our problems onto them. I'd\n> >> really rather not go that way if we can avoid it.\n> \n> > I like that goal, though we'll have to see how difficult it proves. As of\n> > today, a GNU/Linux user building against static OpenLDAP will get a failure,\n> > right? That would export work onto that user, spuriously.\n> \n> As a former packager for Red Hat, my response would be \"you're doing it\n> wrong\". Nobody on any Linux distro should *ever* statically link code\n> from one package into code from another, because they are going to create\n> untold pain for themselves when (not if) the first package is updated.\n> So I flat out reject that as a valid use-case.\n> \n> It may be that that ethos is not so strongly baked-in on other platforms.\n\nPackagers do face more rules than users generally.\n\n> But I'm content to wait and see if there are complaints before rescinding\n> the automatic test; and if there are, I'd prefer to deal with it by just\n> backing off to running the test on Linux only.\n\nOkay.\n\n> > We'd get something like 95% of the value by running the test on one Windows\n> > buildfarm member and one non-Windows buildfarm member.\n> \n> True. But that just brings up the point that we aren't running the test\n> at all on MSVC builds right now. I have no idea how to do that, do you?\n\nI don't. But coverage via non-MSVC Windows is good enough.\n\n> > ... A strategy not having either of those drawbacks would be to skip\n> > the test if libpq.so contains a definition of libpq_unbind().\n> \n> I assume you meant some OpenLDAP symbol?\n\nYeah, that was supposed to say ldap_unbind().\n\n\n", "msg_date": "Fri, 9 Jul 2021 13:29:29 -0700", "msg_from": "Noah Misch <noah@leadboat.com>", "msg_from_op": false, "msg_subject": "Re: Preventing abort() and exit() calls in libpq" } ]
[ { "msg_contents": "Hi,\n\nI sometimes have to deal with queries referencing multiple and/or complex\nviews. In such cases, it's quite troublesome to figure out what is the query\nreally executed. Debug_print_rewritten isn't really useful for non trivial\nqueries, and manually doing the view expansion isn't great either.\n\nWhile not being ideal, I wouldn't mind using a custom extension for that but\nthis isn't an option as get_query_def() is private and isn't likely to change.\n\nAs an alternative, maybe we could expose a simple SRF that would take care of\nrewriting the query and deparsing the resulting query tree(s)?\n\nI'm attaching a POC patch for that, adding a new pg_get_query_def(text) SRF.\n\nUsage example:\n\nSELECT pg_get_query_def('SELECT * FROM shoe') as def;\n def\n--------------------------------------------------------\n SELECT shoename, +\n sh_avail, +\n slcolor, +\n slminlen, +\n slminlen_cm, +\n slmaxlen, +\n slmaxlen_cm, +\n slunit +\n FROM ( SELECT sh.shoename, +\n sh.sh_avail, +\n sh.slcolor, +\n sh.slminlen, +\n (sh.slminlen * un.un_fact) AS slminlen_cm,+\n sh.slmaxlen, +\n (sh.slmaxlen * un.un_fact) AS slmaxlen_cm,+\n sh.slunit +\n FROM shoe_data sh, +\n unit un +\n WHERE (sh.slunit = un.un_name)) shoe; +\n\n(1 row)", "msg_date": "Sun, 27 Jun 2021 12:11:38 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Deparsing rewritten query" }, { "msg_contents": "ne 27. 6. 2021 v 6:11 odesílatel Julien Rouhaud <rjuju123@gmail.com> napsal:\n\n> Hi,\n>\n> I sometimes have to deal with queries referencing multiple and/or complex\n> views. In such cases, it's quite troublesome to figure out what is the\n> query\n> really executed. Debug_print_rewritten isn't really useful for non trivial\n> queries, and manually doing the view expansion isn't great either.\n>\n> While not being ideal, I wouldn't mind using a custom extension for that\n> but\n> this isn't an option as get_query_def() is private and isn't likely to\n> change.\n>\n> As an alternative, maybe we could expose a simple SRF that would take care\n> of\n> rewriting the query and deparsing the resulting query tree(s)?\n>\n> I'm attaching a POC patch for that, adding a new pg_get_query_def(text)\n> SRF.\n>\n> Usage example:\n>\n> SELECT pg_get_query_def('SELECT * FROM shoe') as def;\n> def\n> --------------------------------------------------------\n> SELECT shoename, +\n> sh_avail, +\n> slcolor, +\n> slminlen, +\n> slminlen_cm, +\n> slmaxlen, +\n> slmaxlen_cm, +\n> slunit +\n> FROM ( SELECT sh.shoename, +\n> sh.sh_avail, +\n> sh.slcolor, +\n> sh.slminlen, +\n> (sh.slminlen * un.un_fact) AS slminlen_cm,+\n> sh.slmaxlen, +\n> (sh.slmaxlen * un.un_fact) AS slmaxlen_cm,+\n> sh.slunit +\n> FROM shoe_data sh, +\n> unit un +\n> WHERE (sh.slunit = un.un_name)) shoe; +\n>\n> (1 row)\n>\n\n+1\n\nPavel\n\nne 27. 6. 2021 v 6:11 odesílatel Julien Rouhaud <rjuju123@gmail.com> napsal:Hi,\n\r\nI sometimes have to deal with queries referencing multiple and/or complex\r\nviews.  In such cases, it's quite troublesome to figure out what is the query\r\nreally executed.  Debug_print_rewritten isn't really useful for non trivial\r\nqueries, and manually doing the view expansion isn't great either.\n\r\nWhile not being ideal, I wouldn't mind using a custom extension for that but\r\nthis isn't an option as get_query_def() is private and isn't likely to change.\n\r\nAs an alternative, maybe we could expose a simple SRF that would take care of\r\nrewriting the query and deparsing the resulting query tree(s)?\n\r\nI'm attaching a POC patch for that, adding a new pg_get_query_def(text) SRF.\n\r\nUsage example:\n\r\nSELECT pg_get_query_def('SELECT * FROM shoe') as def;\r\n                          def\r\n--------------------------------------------------------\r\n  SELECT shoename,                                     +\r\n     sh_avail,                                         +\r\n     slcolor,                                          +\r\n     slminlen,                                         +\r\n     slminlen_cm,                                      +\r\n     slmaxlen,                                         +\r\n     slmaxlen_cm,                                      +\r\n     slunit                                            +\r\n    FROM ( SELECT sh.shoename,                         +\r\n             sh.sh_avail,                              +\r\n             sh.slcolor,                               +\r\n             sh.slminlen,                              +\r\n             (sh.slminlen * un.un_fact) AS slminlen_cm,+\r\n             sh.slmaxlen,                              +\r\n             (sh.slmaxlen * un.un_fact) AS slmaxlen_cm,+\r\n             sh.slunit                                 +\r\n            FROM shoe_data sh,                         +\r\n             unit un                                   +\r\n           WHERE (sh.slunit = un.un_name)) shoe;       +\n\r\n(1 row)+1Pavel", "msg_date": "Sun, 27 Jun 2021 06:31:34 +0200", "msg_from": "Pavel Stehule <pavel.stehule@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "Julien Rouhaud <rjuju123@gmail.com> writes:\n> As an alternative, maybe we could expose a simple SRF that would take care of\n> rewriting the query and deparsing the resulting query tree(s)?\n\nI'm not really excited by this, as it seems like it's exposing internal\ndecisions we could change someday; to wit, first that there is any such\nthing as a separate rewriting pass, and second that its output is\ninterpretable as pure SQL. (TBH, I'm not 100% sure that the second\nassumption is true even today, although I know there are ancient comments\nthat claim that.) It's not very hard to imagine someday moving view\nexpansion into the planner on efficiency grounds, leaving the rewriter\nhandling only the rare uses of INSERT/UPDATE/DELETE rules.\n\nIf there's functions in ruleutils.c that we'd need to make public to\nlet somebody write a debugging extension that does this kind of thing,\nI'd be happier with that approach than with creating a core-server SQL\nfunction for it. There might be more than one use-case for the\nexposed bits.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 27 Jun 2021 10:34:52 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "On Sun, Jun 27, 2021 at 10:34:52AM -0400, Tom Lane wrote:\n> \n> I'm not really excited by this, as it seems like it's exposing internal\n> decisions we could change someday; to wit, first that there is any such\n> thing as a separate rewriting pass\n\nSure, but the fact that views will significantly impact the query being\nexecuted from the one written is not an internal decision. In my opinion\nknowing what the final \"real\" query will be is still a valid concern, whether\nwe have a rewriting pass or not.\n\n> and second that its output is\n> interpretable as pure SQL. (TBH, I'm not 100% sure that the second\n> assumption is true even today, although I know there are ancient comments\n> that claim that.)\n\nI totally agree. Note that there was at least one gotcha handled in this\npatch: rewritten views didn't get an alias, which is mandatory for an SQL\nquery.\n\n> It's not very hard to imagine someday moving view\n> expansion into the planner on efficiency grounds, leaving the rewriter\n> handling only the rare uses of INSERT/UPDATE/DELETE rules.\n\nAgreed. One the other hand having such a function in core may ensure that any\nsignificant change in those area will keep an API to retrieve the final query\nrepresentation.\n\n> If there's functions in ruleutils.c that we'd need to make public to\n> let somebody write a debugging extension that does this kind of thing,\n> I'd be happier with that approach than with creating a core-server SQL\n> function for it. There might be more than one use-case for the\n> exposed bits.\n\nIt would mean exposing at least get_query_def(). I thought that exposing this\nfunction was already suggested and refused, but I may be wrong. Maybe other\npeople would like to have nearby functions exposed too.\n\nNote that if we go this way, we would still need at least something like this\npatch's chunk in rewriteHandler.c applied, as otherwise the vast majority of\nrewritten and deparsed queries won't be valid.\n\n\n", "msg_date": "Sun, 27 Jun 2021 23:03:43 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "Julien Rouhaud <rjuju123@gmail.com> writes:\n> On Sun, Jun 27, 2021 at 10:34:52AM -0400, Tom Lane wrote:\n>> It's not very hard to imagine someday moving view\n>> expansion into the planner on efficiency grounds, leaving the rewriter\n>> handling only the rare uses of INSERT/UPDATE/DELETE rules.\n\n> Agreed. One the other hand having such a function in core may ensure that any\n> significant change in those area will keep an API to retrieve the final query\n> representation.\n\nMy point is precisely that I'm unwilling to make such a promise.\n\nI do not buy that this capability is worth very much, given that\nwe've gotten along fine without it for twenty-plus years. If you\nwant to have it as an internal, might-change-at-any-time API,\nthat seems all right. If you're trying to lock it down as something\nthat will be there forevermore, you're likely to end up with nothing.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 27 Jun 2021 11:14:05 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "On Sun, Jun 27, 2021 at 11:14:05AM -0400, Tom Lane wrote:\n> Julien Rouhaud <rjuju123@gmail.com> writes:\n> \n> > Agreed. One the other hand having such a function in core may ensure that any\n> > significant change in those area will keep an API to retrieve the final query\n> > representation.\n> \n> My point is precisely that I'm unwilling to make such a promise.\n> \n> I do not buy that this capability is worth very much, given that\n> we've gotten along fine without it for twenty-plus years. If you\n> want to have it as an internal, might-change-at-any-time API,\n> that seems all right.\n\nI'm totally fine with a might-change-at-any-time API, but not with a\nmight-disappear-at-anytime API. If exposing get_query_def() can become\nvirtually useless in a few releases with no hope to get required in-core change\nfor retrieving the final query representation, I agree this we can stop the\ndiscussion here.\n\n\n", "msg_date": "Sun, 27 Jun 2021 23:21:37 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "Le 27/06/2021 à 17:14, Tom Lane a écrit :\n> Julien Rouhaud <rjuju123@gmail.com> writes:\n>> On Sun, Jun 27, 2021 at 10:34:52AM -0400, Tom Lane wrote:\n>>> It's not very hard to imagine someday moving view\n>>> expansion into the planner on efficiency grounds, leaving the rewriter\n>>> handling only the rare uses of INSERT/UPDATE/DELETE rules.\n>> Agreed. One the other hand having such a function in core may ensure that any\n>> significant change in those area will keep an API to retrieve the final query\n>> representation.\n> My point is precisely that I'm unwilling to make such a promise.\n>\n> I do not buy that this capability is worth very much, given that\n> we've gotten along fine without it for twenty-plus years. If you\n> want to have it as an internal, might-change-at-any-time API,\n> that seems all right. If you're trying to lock it down as something\n> that will be there forevermore, you're likely to end up with nothing.\n>\n> \t\t\tregards, tom lane\n\n\nI have to say that such feature would be very helpful for some DBA and \nespecially migration work. The problem is when you have tons of views \nthat call other views in the from or join clauses. These views also call \nother views, etc. I have had instances where there were up to 25 nested \nviews calls. When you want to clean up this kind of code, using \nget_query_def () will help save a lot of manual rewrite time and \nheadache to get the final code executed.\n\n\nIf we could at least call get_query_def()through an extension if we \ndidn't have a functionit would be ideal for DBAs.I agree this is unusual \nbut when it does happen to you being able to call get_query_def () helps \na lot.\n\n\n-- \nGilles Darold\nhttp://www.darold.net/\n\n\n\n\n\n\n\nLe 27/06/2021 à 17:14, Tom Lane a\n écrit :\n\n\nJulien Rouhaud <rjuju123@gmail.com> writes:\n\n\nOn Sun, Jun 27, 2021 at 10:34:52AM -0400, Tom Lane wrote:\n\n\nIt's not very hard to imagine someday moving view\nexpansion into the planner on efficiency grounds, leaving the rewriter\nhandling only the rare uses of INSERT/UPDATE/DELETE rules.\n\n\n\n\n\n\nAgreed. One the other hand having such a function in core may ensure that any\nsignificant change in those area will keep an API to retrieve the final query\nrepresentation.\n\n\n\nMy point is precisely that I'm unwilling to make such a promise.\n\nI do not buy that this capability is worth very much, given that\nwe've gotten along fine without it for twenty-plus years. If you\nwant to have it as an internal, might-change-at-any-time API,\nthat seems all right. If you're trying to lock it down as something\nthat will be there forevermore, you're likely to end up with nothing.\n\n\t\t\tregards, tom lane\n\n\n\n\nI\n have to say that such feature would be very helpful for some\n DBA and especially migration work. The\n problem is when you have tons of views that call other views\n in the from or join clauses. These\n views also call other views, etc. I\n have had instances where there were up to 25 nested views\n calls. When\n you want to clean up this kind of code, using get_query_def\n () will help save a lot of manual rewrite time and headache\n to get the final code executed.\n\n\n\nIf\n we could at least call get_query_def() through an extension\n if we didn't have a function it would be ideal for DBAs.\nI\n agree this is unusual but when it does happen to you being\n able to call get_query_def () helps a lot. \n\n\n-- \nGilles Darold\nhttp://www.darold.net/", "msg_date": "Mon, 28 Jun 2021 16:06:54 +0200", "msg_from": "Gilles Darold <gilles@darold.net>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "Thanks for the feedback Gilles!\n\nOn Mon, Jun 28, 2021 at 04:06:54PM +0200, Gilles Darold wrote:\n> \n> If we could at least call get_query_def()through an extension if we didn't\n> have a functionit would be ideal for DBAs.I agree this is unusual but when\n> it does happen to you being able to call get_query_def () helps a lot.\n\nSince at least 2 other persons seems to be interested in that feature, I can\ntake care of writing and maintaining such an extension, provided that the\nrequired infrastructure is available in core.\n\nPFA v2 of the patch which only adds the required alias and expose\nget_query_def().", "msg_date": "Tue, 29 Jun 2021 00:41:47 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "Le 28/06/2021 à 18:41, Julien Rouhaud a écrit :\n> Thanks for the feedback Gilles!\n>\n> On Mon, Jun 28, 2021 at 04:06:54PM +0200, Gilles Darold wrote:\n>> If we could at least call get_query_def()through an extension if we didn't\n>> have a functionit would be ideal for DBAs.I agree this is unusual but when\n>> it does happen to you being able to call get_query_def () helps a lot.\n> Since at least 2 other persons seems to be interested in that feature, I can\n> take care of writing and maintaining such an extension, provided that the\n> required infrastructure is available in core.\n>\n> PFA v2 of the patch which only adds the required alias and expose\n> get_query_def().\n\n\nThanks a lot Julien, such facilities are really helpful for DBAs and\nmake the difference with other RDBMS. I don't think that this feature\nexists else where.\n\n\n-- \nGilles Darold\nhttp://www.darold.net/\n\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 19:30:55 +0200", "msg_from": "Gilles Darold <gilles@darold.net>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "Hi\n\npo 31. 1. 2022 v 15:37 odesílatel Gilles Darold <gilles@darold.net> napsal:\n\n> Le 28/06/2021 à 18:41, Julien Rouhaud a écrit :\n> > Thanks for the feedback Gilles!\n> >\n> > On Mon, Jun 28, 2021 at 04:06:54PM +0200, Gilles Darold wrote:\n> >> If we could at least call get_query_def()through an extension if we\n> didn't\n> >> have a functionit would be ideal for DBAs.I agree this is unusual but\n> when\n> >> it does happen to you being able to call get_query_def () helps a lot.\n> > Since at least 2 other persons seems to be interested in that feature, I\n> can\n> > take care of writing and maintaining such an extension, provided that the\n> > required infrastructure is available in core.\n> >\n> > PFA v2 of the patch which only adds the required alias and expose\n> > get_query_def().\n>\n\nI checked the last patch. I think it is almost trivial. I miss just\ncomment, why this alias is necessary\n\n+ if (!rte->alias)\n+ rte->alias = makeAlias(get_rel_name(rte->relid), NULL);\n\nRegards\n\nPavel\n\n\n\n>\n> Thanks a lot Julien, such facilities are really helpful for DBAs and\n> make the difference with other RDBMS. I don't think that this feature\n> exists else where.\n>\n>\n> --\n> Gilles Darold\n> http://www.darold.net/\n>\n>\n>\n>\n>\n>\n>\n\nHipo 31. 1. 2022 v 15:37 odesílatel Gilles Darold <gilles@darold.net> napsal:Le 28/06/2021 à 18:41, Julien Rouhaud a écrit :\n> Thanks for the feedback Gilles!\n>\n> On Mon, Jun 28, 2021 at 04:06:54PM +0200, Gilles Darold wrote:\n>> If we could at least call get_query_def()through an extension if we didn't\n>> have a functionit would be ideal for DBAs.I agree this is unusual but when\n>> it does happen to you being able to call get_query_def () helps a lot.\n> Since at least 2 other persons seems to be interested in that feature, I can\n> take care of writing and maintaining such an extension, provided that the\n> required infrastructure is available in core.\n>\n> PFA v2 of the patch which only adds the required alias and expose\n> get_query_def().I checked the last patch.  I think it is almost trivial. I miss just comment, why this alias is necessary+\tif (!rte->alias)+\t\trte->alias = makeAlias(get_rel_name(rte->relid), NULL);RegardsPavel\n\n\nThanks a lot Julien, such facilities are really helpful for DBAs and\nmake the difference with other RDBMS. I don't think that this feature\nexists else where.\n\n\n-- \nGilles Darold\nhttp://www.darold.net/", "msg_date": "Mon, 31 Jan 2022 18:46:37 +0100", "msg_from": "Pavel Stehule <pavel.stehule@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "Hi,\n\nOn Mon, Jan 31, 2022 at 06:46:37PM +0100, Pavel Stehule wrote:\n> \n> I checked the last patch. I think it is almost trivial. I miss just\n> comment, why this alias is necessary\n> \n> + if (!rte->alias)\n> + rte->alias = makeAlias(get_rel_name(rte->relid), NULL);\n\nThanks for looking at it Pavel!\n\nThe alias is necessary because otherwise queries involving views won't produce\nvalid SQL, as aliases for subquery is mandatory. This was part of the v1\nregression tests:\n\n+-- test pg_get_query_def()\n+SELECT pg_get_query_def('SELECT * FROM shoe') as def;\n+ def\n+--------------------------------------------------------\n+ SELECT shoename, +\n+ sh_avail, +\n+ slcolor, +\n+ slminlen, +\n+ slminlen_cm, +\n+ slmaxlen, +\n+ slmaxlen_cm, +\n+ slunit +\n+ FROM ( SELECT sh.shoename, +\n+ sh.sh_avail, +\n+ sh.slcolor, +\n+ sh.slminlen, +\n+ (sh.slminlen * un.un_fact) AS slminlen_cm,+\n+ sh.slmaxlen, +\n+ (sh.slmaxlen * un.un_fact) AS slmaxlen_cm,+\n+ sh.slunit +\n+ FROM shoe_data sh, +\n+ unit un +\n+ WHERE (sh.slunit = un.un_name)) shoe; +\n\nthe mandatory \"shoe\" alias is added with that change.\n\nI looked for other similar problems and didn't find anything, but given the\ncomplexity of the SQL standard it's quite possible that I missed some other\ncorner case.\n\n\n", "msg_date": "Tue, 1 Feb 2022 02:09:38 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "po 31. 1. 2022 v 19:09 odesílatel Julien Rouhaud <rjuju123@gmail.com>\nnapsal:\n\n> Hi,\n>\n> On Mon, Jan 31, 2022 at 06:46:37PM +0100, Pavel Stehule wrote:\n> >\n> > I checked the last patch. I think it is almost trivial. I miss just\n> > comment, why this alias is necessary\n> >\n> > + if (!rte->alias)\n> > + rte->alias = makeAlias(get_rel_name(rte->relid), NULL);\n>\n> Thanks for looking at it Pavel!\n>\n> The alias is necessary because otherwise queries involving views won't\n> produce\n> valid SQL, as aliases for subquery is mandatory. This was part of the v1\n> regression tests:\n>\n> +-- test pg_get_query_def()\n> +SELECT pg_get_query_def('SELECT * FROM shoe') as def;\n> + def\n> +--------------------------------------------------------\n> + SELECT shoename, +\n> + sh_avail, +\n> + slcolor, +\n> + slminlen, +\n> + slminlen_cm, +\n> + slmaxlen, +\n> + slmaxlen_cm, +\n> + slunit +\n> + FROM ( SELECT sh.shoename, +\n> + sh.sh_avail, +\n> + sh.slcolor, +\n> + sh.slminlen, +\n> + (sh.slminlen * un.un_fact) AS slminlen_cm,+\n> + sh.slmaxlen, +\n> + (sh.slmaxlen * un.un_fact) AS slmaxlen_cm,+\n> + sh.slunit +\n> + FROM shoe_data sh, +\n> + unit un +\n> + WHERE (sh.slunit = un.un_name)) shoe; +\n>\n> the mandatory \"shoe\" alias is added with that change.\n>\n\n> I looked for other similar problems and didn't find anything, but given the\n> complexity of the SQL standard it's quite possible that I missed some other\n> corner case.\n>\n\nI don't feel good about forcing an alias. relname doesn't ensure\nuniqueness. You can have two views with the same name from different\nschemas. Moreover this field is necessary only when a deparsed query is\nprinted, not always.\n\nIsn't possible to compute the correct subquery alias in print time when it\nis missing?\n\nRegards\n\nPavel\n\npo 31. 1. 2022 v 19:09 odesílatel Julien Rouhaud <rjuju123@gmail.com> napsal:Hi,\n\r\nOn Mon, Jan 31, 2022 at 06:46:37PM +0100, Pavel Stehule wrote:\r\n> \r\n> I checked the last patch.  I think it is almost trivial. I miss just\r\n> comment, why this alias is necessary\r\n> \r\n> + if (!rte->alias)\r\n> + rte->alias = makeAlias(get_rel_name(rte->relid), NULL);\n\r\nThanks for looking at it Pavel!\n\r\nThe alias is necessary because otherwise queries involving views won't produce\r\nvalid SQL, as aliases for subquery is mandatory.  This was part of the v1\r\nregression tests:\n\r\n+-- test pg_get_query_def()\r\n+SELECT pg_get_query_def('SELECT * FROM shoe') as def;\r\n+                          def\r\n+--------------------------------------------------------\r\n+  SELECT shoename,                                     +\r\n+     sh_avail,                                         +\r\n+     slcolor,                                          +\r\n+     slminlen,                                         +\r\n+     slminlen_cm,                                      +\r\n+     slmaxlen,                                         +\r\n+     slmaxlen_cm,                                      +\r\n+     slunit                                            +\r\n+    FROM ( SELECT sh.shoename,                         +\r\n+             sh.sh_avail,                              +\r\n+             sh.slcolor,                               +\r\n+             sh.slminlen,                              +\r\n+             (sh.slminlen * un.un_fact) AS slminlen_cm,+\r\n+             sh.slmaxlen,                              +\r\n+             (sh.slmaxlen * un.un_fact) AS slmaxlen_cm,+\r\n+             sh.slunit                                 +\r\n+            FROM shoe_data sh,                         +\r\n+             unit un                                   +\r\n+           WHERE (sh.slunit = un.un_name)) shoe;       +\n\r\nthe mandatory \"shoe\" alias is added with that change.\n\r\nI looked for other similar problems and didn't find anything, but given the\r\ncomplexity of the SQL standard it's quite possible that I missed some other\r\ncorner case.I don't feel good about forcing an alias. relname doesn't ensure uniqueness. You can have two views with the same name from different schemas. Moreover this field is necessary only when a deparsed query is printed, not always. Isn't possible to compute the correct subquery alias in print time when it is missing?RegardsPavel", "msg_date": "Mon, 31 Jan 2022 22:05:44 +0100", "msg_from": "Pavel Stehule <pavel.stehule@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "Hi,\n\nOn Mon, Jan 31, 2022 at 10:05:44PM +0100, Pavel Stehule wrote:\n> \n> I don't feel good about forcing an alias. relname doesn't ensure\n> uniqueness. You can have two views with the same name from different\n> schemas. Moreover this field is necessary only when a deparsed query is\n> printed, not always.\n\nYes I agree.\n\n> Isn't possible to compute the correct subquery alias in print time when it\n> is missing?\n\nActually I think that the current code already does everything to generate\nunique refnames, it's just that they don't get printed for a query after view\nexpansions. I modified the patch to simply make sure that an alias is\ndisplayed when it's a subquery and the output using a custom pg_get_query_def\nis like that:\n\n# select pg_get_query_def('select * from nsp1.v1');\n pg_get_query_def\n-------------------------------\n SELECT nb +\n FROM ( SELECT 1 AS nb) v1;+\n\n(1 row)\n\n\n# select pg_get_query_def('select * from nsp1.v1, nsp2.v1');\n pg_get_query_def\n-------------------------------\n SELECT v1.nb, +\n v1_1.nb +\n FROM ( SELECT 1 AS nb) v1,+\n ( SELECT 1 AS nb) v1_1; +\n\n(1 row)", "msg_date": "Tue, 1 Feb 2022 11:37:07 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "út 1. 2. 2022 v 4:38 odesílatel Julien Rouhaud <rjuju123@gmail.com> napsal:\n\n> Hi,\n>\n> On Mon, Jan 31, 2022 at 10:05:44PM +0100, Pavel Stehule wrote:\n> >\n> > I don't feel good about forcing an alias. relname doesn't ensure\n> > uniqueness. You can have two views with the same name from different\n> > schemas. Moreover this field is necessary only when a deparsed query is\n> > printed, not always.\n>\n> Yes I agree.\n>\n> > Isn't possible to compute the correct subquery alias in print time when\n> it\n> > is missing?\n>\n> Actually I think that the current code already does everything to generate\n> unique refnames, it's just that they don't get printed for a query after\n> view\n> expansions. I modified the patch to simply make sure that an alias is\n> displayed when it's a subquery and the output using a custom\n> pg_get_query_def\n> is like that:\n>\n> # select pg_get_query_def('select * from nsp1.v1');\n> pg_get_query_def\n> -------------------------------\n> SELECT nb +\n> FROM ( SELECT 1 AS nb) v1;+\n>\n> (1 row)\n>\n>\n> # select pg_get_query_def('select * from nsp1.v1, nsp2.v1');\n> pg_get_query_def\n> -------------------------------\n> SELECT v1.nb, +\n> v1_1.nb +\n> FROM ( SELECT 1 AS nb) v1,+\n> ( SELECT 1 AS nb) v1_1; +\n>\n> (1 row)\n>\n\nI tested your patch, and it looks so it is working without any problem. All\ntests passed.\n\nThere is just one question. If printalias = true will be active for all\ncases or just with some flag?\n\nI didn't find any visible change of this modification without your\nfunction, so maybe it can be active for all cases without any condition.\n\nRegards\n\nPavel\n\nút 1. 2. 2022 v 4:38 odesílatel Julien Rouhaud <rjuju123@gmail.com> napsal:Hi,\n\nOn Mon, Jan 31, 2022 at 10:05:44PM +0100, Pavel Stehule wrote:\n> \n> I don't feel good about forcing an alias. relname doesn't ensure\n> uniqueness. You can have two views with the same name from different\n> schemas. Moreover this field is necessary only when a deparsed query is\n> printed, not always.\n\nYes I agree.\n\n> Isn't possible to compute the correct subquery alias in print time when it\n> is missing?\n\nActually I think that the current code already does everything to generate\nunique refnames, it's just that they don't get printed for a query after view\nexpansions.  I modified the patch to simply make sure that an alias is\ndisplayed when it's a subquery and the output using a custom pg_get_query_def\nis like that:\n\n# select  pg_get_query_def('select * from nsp1.v1');\n       pg_get_query_def\n-------------------------------\n  SELECT nb                   +\n    FROM ( SELECT 1 AS nb) v1;+\n\n(1 row)\n\n\n# select  pg_get_query_def('select * from nsp1.v1, nsp2.v1');\n       pg_get_query_def\n-------------------------------\n  SELECT v1.nb,               +\n     v1_1.nb                  +\n    FROM ( SELECT 1 AS nb) v1,+\n     ( SELECT 1 AS nb) v1_1;  +\n\n(1 row)I tested your patch, and it looks so it is working without any problem. All tests passed.There is just one question. If printalias = true will be active for all cases or just with some flag?I didn't find any visible change of this modification without your function, so maybe it can be active for all cases without any condition.RegardsPavel", "msg_date": "Tue, 1 Feb 2022 20:35:00 +0100", "msg_from": "Pavel Stehule <pavel.stehule@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "On Tue, Feb 1, 2022 at 9:08 AM Julien Rouhaud <rjuju123@gmail.com> wrote:\n>\n> Hi,\n\nThanks. +1 for this work. Some comments on v3:\n\n1) How about pg_get_rewritten_query()?\n2) Docs missing?\n3) How about allowing only the users who are explicitly granted to use\nthis function like pg_log_backend_memory_contexts,\npg_log_query_plan(in discussion), pg_log_backtrace(in discussion)?\n4) initStringInfo in the for loop will palloc every time and will leak\nthe memory. you probably need to do resetStringInfo in the for loop\ninstead.\n+ foreach(lc, querytree_list)\n+ {\n+ query = (Query *) lfirst(lc);\n+ initStringInfo(&buf);\n5) I would even suggest using a temp memory context for this function\nalone, because it will ensure we dont' leak any memory which probably\nparser, analyzer, rewriter would use.\n6) Why can't query be for loop variable?\n+ Query *query;\n7) Why can't the function check for empty query string and emit error\nimmedeiately (empty string isn't allowed or some other better error\nmessage), rather than depending on the pg_parse_query?\n+ parsetree_list = pg_parse_query(sql);\n+\n+ /* only support one statement at a time */\n+ if (list_length(parsetree_list) != 1)\n+ ereport(ERROR,\n+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),\n+ errmsg(\"a single statement should be provided\")));\n8) Show rewritten query given raw query string\n+{ oid => '9246', descr => 'show a query as rewritten',\n9) Doesn't the input need a ; at the end of query? not sure if the\nparser assumes it as provided?\n+SELECT pg_get_query_def('SELECT * FROM shoe') as def;\n10) For pg_get_viewdef it makes sense to have the test case in\nrules.sql, but shouldn't this function be in misc_functions.sql?\n11) Missing bump cat version note in the commit message.\n12) I'm just thinking adding an extra option to explain, which will\nthen print the rewritten query in the explain output, would be useful\nthan having a separate function to do this?\n13) Somebody might also be interested to just get the completed\nplanned query i.e. output of pg_plan_query? or the other way, given\nthe query plan as input to a function, can we get the query back?\nsomething like postgres_fdw/deparse.c does?\n\nRegards,\nBharath Rupireddy.\n\n\n", "msg_date": "Wed, 2 Feb 2022 19:09:35 +0530", "msg_from": "Bharath Rupireddy <bharath.rupireddyforpostgres@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "Hi,\n\nOn Wed, Feb 02, 2022 at 07:09:35PM +0530, Bharath Rupireddy wrote:\n> On Tue, Feb 1, 2022 at 9:08 AM Julien Rouhaud <rjuju123@gmail.com> wrote:\n> >\n> > Hi,\n> \n> Thanks. +1 for this work. Some comments on v3:\n> \n> 1) How about pg_get_rewritten_query()?\n\nArgh, I just realized that I sent the patch from the wrong branch. Per\nprevious complaint from Tom, I'm not proposing that function anymore (I will\npublish an extension for that if the patch gets commits) but only expose\nget_query_def().\n\nI'm attaching the correct patch this time, sorry about that.", "msg_date": "Wed, 2 Feb 2022 22:12:31 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "Hi,\n\nOn Tue, Feb 01, 2022 at 08:35:00PM +0100, Pavel Stehule wrote:\n> \n> I tested your patch, and it looks so it is working without any problem. All\n> tests passed.\n> \n> There is just one question. If printalias = true will be active for all\n> cases or just with some flag?\n\nSorry, as I just replied to Bharath I sent the wrong patch. The new patch has\nthe same modification with printalias = true though, so I can still answer that\nquestion. The change is active for all cases, however it's a no-op for any\nin-core case, as a query sent by a client should be valid, and thus should have\nan alias attached to all subqueries. It's only different if you call\nget_query_def() on the result of pg_analyze_and_rewrite(), since this code\ndoesn't add the subquery aliases as those aren't needed for the execution part.\n\n\n", "msg_date": "Wed, 2 Feb 2022 22:16:30 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "st 2. 2. 2022 v 15:18 odesílatel Julien Rouhaud <rjuju123@gmail.com> napsal:\n\n> Hi,\n>\n> On Tue, Feb 01, 2022 at 08:35:00PM +0100, Pavel Stehule wrote:\n> >\n> > I tested your patch, and it looks so it is working without any problem.\n> All\n> > tests passed.\n> >\n> > There is just one question. If printalias = true will be active for all\n> > cases or just with some flag?\n>\n> Sorry, as I just replied to Bharath I sent the wrong patch. The new patch\n> has\n> the same modification with printalias = true though, so I can still answer\n> that\n> question. The change is active for all cases, however it's a no-op for any\n> in-core case, as a query sent by a client should be valid, and thus should\n> have\n> an alias attached to all subqueries. It's only different if you call\n> get_query_def() on the result of pg_analyze_and_rewrite(), since this code\n> doesn't add the subquery aliases as those aren't needed for the execution\n> part.\n>\n\nok.\n\nI checked this trivial patch, and I don't see any problem. Again I run\ncheck-world with success. The documentation for this feature is not\nnecessary. But I am not sure about regress tests. Without any other code,\nenfosing printalias will be invisible. What do you think about the\ntransformation of your extension to a new module in src/test/modules? Maybe\nit can be used for other checks in future.\n\nRegards\n\nPavel\n\nst 2. 2. 2022 v 15:18 odesílatel Julien Rouhaud <rjuju123@gmail.com> napsal:Hi,\n\nOn Tue, Feb 01, 2022 at 08:35:00PM +0100, Pavel Stehule wrote:\n> \n> I tested your patch, and it looks so it is working without any problem. All\n> tests passed.\n> \n> There is just one question. If printalias = true will be active for all\n> cases or just with some flag?\n\nSorry, as I just replied to Bharath I sent the wrong patch.  The new patch has\nthe same modification with printalias = true though, so I can still answer that\nquestion.  The change is active for all cases, however it's a no-op for any\nin-core case, as a query sent by a client should be valid, and thus should have\nan alias attached to all subqueries.  It's only different if you call\nget_query_def() on the result of pg_analyze_and_rewrite(), since this code\ndoesn't add the subquery aliases as those aren't needed for the execution part.ok.I checked this trivial patch, and I don't see any problem. Again I run check-world with success. The documentation for this feature is not necessary. But I am not sure about regress tests. Without any other code, enfosing printalias will be invisible. What do you think about the transformation of your extension to a new module in src/test/modules? Maybe it can be used for other checks in future. RegardsPavel", "msg_date": "Wed, 2 Feb 2022 19:49:41 +0100", "msg_from": "Pavel Stehule <pavel.stehule@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "Hi,\n\nOn Wed, Feb 02, 2022 at 07:49:41PM +0100, Pavel Stehule wrote:\n>\n> I checked this trivial patch, and I don't see any problem. Again I run\n> check-world with success. The documentation for this feature is not\n> necessary. But I am not sure about regress tests. Without any other code,\n> enfosing printalias will be invisible. What do you think about the\n> transformation of your extension to a new module in src/test/modules? Maybe\n> it can be used for other checks in future.\n\nI'm not opposed, but previously Tom explicitly said that he thinks this feature\nis useless and is strongly opposed to making any kind of promise that the\ncurrent interface to make it possible (if get_query_def() is exposed) would be\nmaintained. Adding such a test module would probably a reason to reject the\npatch altogether. I'm just hoping that this change, which is a no-op for\nany legal query, is acceptable. It can only break something if you feed wrong\ndata to get_query_def(), which would be my problem and not the project's\nproblem.\n\n\n", "msg_date": "Fri, 4 Feb 2022 17:35:25 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "pá 4. 2. 2022 v 10:36 odesílatel Julien Rouhaud <rjuju123@gmail.com> napsal:\n\n> Hi,\n>\n> On Wed, Feb 02, 2022 at 07:49:41PM +0100, Pavel Stehule wrote:\n> >\n> > I checked this trivial patch, and I don't see any problem. Again I run\n> > check-world with success. The documentation for this feature is not\n> > necessary. But I am not sure about regress tests. Without any other code,\n> > enfosing printalias will be invisible. What do you think about the\n> > transformation of your extension to a new module in src/test/modules?\n> Maybe\n> > it can be used for other checks in future.\n>\n> I'm not opposed, but previously Tom explicitly said that he thinks this\n> feature\n> is useless and is strongly opposed to making any kind of promise that the\n> current interface to make it possible (if get_query_def() is exposed)\n> would be\n> maintained. Adding such a test module would probably a reason to reject\n> the\n> patch altogether. I'm just hoping that this change, which is a no-op for\n> any legal query, is acceptable. It can only break something if you feed\n> wrong\n> data to get_query_def(), which would be my problem and not the project's\n> problem.\n>\n\nok, I don't have any problem with it. Then there is not necessarily any\nchange, and I'll mark this patch as ready for committer.\n\nRegards\n\nPavel\n\npá 4. 2. 2022 v 10:36 odesílatel Julien Rouhaud <rjuju123@gmail.com> napsal:Hi,\n\nOn Wed, Feb 02, 2022 at 07:49:41PM +0100, Pavel Stehule wrote:\n>\n> I checked this trivial patch, and I don't see any problem. Again I run\n> check-world with success. The documentation for this feature is not\n> necessary. But I am not sure about regress tests. Without any other code,\n> enfosing printalias will be invisible. What do you think about the\n> transformation of your extension to a new module in src/test/modules? Maybe\n> it can be used for other checks in future.\n\nI'm not opposed, but previously Tom explicitly said that he thinks this feature\nis useless and is strongly opposed to making any kind of promise that the\ncurrent interface to make it possible (if get_query_def() is exposed) would be\nmaintained.  Adding such a test module would probably a reason to reject the\npatch altogether.  I'm just hoping that this change, which is a no-op for\nany legal query, is acceptable.  It can only break something if you feed wrong\ndata to get_query_def(), which would be my problem and not the project's\nproblem.ok, I don't have any problem with it. Then there is not necessarily any change, and I'll mark this patch as ready for committer.RegardsPavel", "msg_date": "Fri, 4 Feb 2022 12:45:05 +0100", "msg_from": "Pavel Stehule <pavel.stehule@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "On Fri, Feb 04, 2022 at 12:45:05PM +0100, Pavel Stehule wrote:\n> \n> ok, I don't have any problem with it. Then there is not necessarily any\n> change, and I'll mark this patch as ready for committer.\n\nThanks Pavel!\n\nI also realized that the CF entry description wasn't accurate anymore, so I\nalso fixed that.\n\n\n", "msg_date": "Sat, 5 Feb 2022 13:04:40 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "Julien Rouhaud <rjuju123@gmail.com> writes:\n> I'm attaching the correct patch this time, sorry about that.\n\nWhile I'm okay with this in principle, as it stands it fails\nheaderscheck/cpluspluscheck:\n\n$ src/tools/pginclude/headerscheck \nIn file included from /tmp/headerscheck.Oi8jj3/test.c:2:\n./src/include/utils/ruleutils.h:42:35: error: unknown type name 'StringInfo'; did you mean 'String'?\n void get_query_def(Query *query, StringInfo buf, List *parentnamespace,\n ^~~~~~~~~~\n String\n./src/include/utils/ruleutils.h:43:9: error: unknown type name 'TupleDesc'\n TupleDesc resultDesc,\n ^~~~~~~~~\n\nWe could of course add the necessary #include's to ruleutils.h,\nbut considering that we seem to have been at some pains to minimize\nits #include footprint, I'm not really happy with that approach.\nI'm inclined to think that maybe a wrapper function with a slightly\nsimplified interface would be a better way to go. The deparsed string\ncould just be returned as a palloc'd \"char *\", unless you have some reason\nto need it to be in a StringInfo. I wonder which of the other parameters\nreally need to be exposed, too. Several of them seem to be more internal\nto ruleutils.c than something that outside callers would care to\nmanipulate. For instance, since struct deparse_namespace isn't exposed,\nthere really isn't any way to pass anything except NIL for\nparentnamespace.\n\nThe bigger picture here is that get_query_def's API has changed over time\ninternally to ruleutils.c, and I kind of expect that that might continue\nin future, so having a wrapper function with a more stable API could be a\ngood thing.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 25 Mar 2022 17:49:04 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "On Fri, Mar 25, 2022 at 05:49:04PM -0400, Tom Lane wrote:\n> Julien Rouhaud <rjuju123@gmail.com> writes:\n> > I'm attaching the correct patch this time, sorry about that.\n> \n> While I'm okay with this in principle, as it stands it fails\n> headerscheck/cpluspluscheck:\n> \n> $ src/tools/pginclude/headerscheck \n> In file included from /tmp/headerscheck.Oi8jj3/test.c:2:\n> ./src/include/utils/ruleutils.h:42:35: error: unknown type name 'StringInfo'; did you mean 'String'?\n> void get_query_def(Query *query, StringInfo buf, List *parentnamespace,\n> ^~~~~~~~~~\n> String\n> ./src/include/utils/ruleutils.h:43:9: error: unknown type name 'TupleDesc'\n> TupleDesc resultDesc,\n> ^~~~~~~~~\n\nAh thanks for the info. I actually never tried headerscheck/cplupluscheck\nbefore.\n\n> We could of course add the necessary #include's to ruleutils.h,\n> but considering that we seem to have been at some pains to minimize\n> its #include footprint, I'm not really happy with that approach.\n> I'm inclined to think that maybe a wrapper function with a slightly\n> simplified interface would be a better way to go. The deparsed string\n> could just be returned as a palloc'd \"char *\", unless you have some reason\n> to need it to be in a StringInfo. I wonder which of the other parameters\n> really need to be exposed, too. Several of them seem to be more internal\n> to ruleutils.c than something that outside callers would care to\n> manipulate. For instance, since struct deparse_namespace isn't exposed,\n> there really isn't any way to pass anything except NIL for\n> parentnamespace.\n> \n> The bigger picture here is that get_query_def's API has changed over time\n> internally to ruleutils.c, and I kind of expect that that might continue\n> in future, so having a wrapper function with a more stable API could be a\n> good thing.\n\nFair point. That's a much better approach and goes well with the rest of the\nexposed functions in that file. I went with a pg_get_querydef, getting rid of\nthe StringInfo and the List and using the same \"bool pretty\" flag as used\nelsewhere. While doing so, I saw that there were a lot of copy/pasted code for\nthe pretty flags, so I added a GET_PRETTY_FLAGS(pretty) macro to avoid adding\nyet another occurrence. I also kept the wrapColument and startIdent as they\ncan be easily used by callers.", "msg_date": "Sun, 27 Mar 2022 13:21:09 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Add a pg_get_query_def function (was Re: Deparsing rewritten query)" }, { "msg_contents": "Julien Rouhaud <rjuju123@gmail.com> writes:\n> [ v4-0001-Add-a-pg_get_query_def-wrapper-around-get_query_d.patch ]\n\nThis seems about ready to go to me, except for\n\n(1) as an exported API, pg_get_querydef needs a full API spec in its\nheader comment. \"Read the code to figure out what to do\" is not OK\nin my book.\n\n(2) I don't think this has been thought out too well:\n\n> I also kept the wrapColument and startIdent as they\n> can be easily used by callers.\n\nThe appropriate values for these are determined by macros that are\nlocal in ruleutils.c, so it's not that \"easy\" for outside callers\nto conform to standard practice. I suppose we could move\nWRAP_COLUMN_DEFAULT etc into ruleutils.h, but is there actually a\nuse-case for messing with those? I don't see any other exported\nfunctions that go beyond offering a \"bool pretty\" option, so\nI think it might be a mistake for this one to be different.\n(The pattern that I see is that a ruleutils function could have\n\"bool pretty\", or it could have \"int prettyFlags, int startIndent\"\nwhich is an expansion of that; but mixing those levels of detail\ndoesn't seem very wise.)\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 27 Mar 2022 11:53:58 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Add a pg_get_query_def function (was Re: Deparsing rewritten\n query)" }, { "msg_contents": "On Sun, Mar 27, 2022 at 11:53:58AM -0400, Tom Lane wrote:\n> Julien Rouhaud <rjuju123@gmail.com> writes:\n> > [ v4-0001-Add-a-pg_get_query_def-wrapper-around-get_query_d.patch ]\n>\n> This seems about ready to go to me, except for\n>\n> (1) as an exported API, pg_get_querydef needs a full API spec in its\n> header comment. \"Read the code to figure out what to do\" is not OK\n> in my book.\n\nFixed.\n\n> (2) I don't think this has been thought out too well:\n>\n> > I also kept the wrapColument and startIdent as they\n> > can be easily used by callers.\n>\n> The appropriate values for these are determined by macros that are\n> local in ruleutils.c, so it's not that \"easy\" for outside callers\n> to conform to standard practice. I suppose we could move\n> WRAP_COLUMN_DEFAULT etc into ruleutils.h, but is there actually a\n> use-case for messing with those?\n\nAs far as I can see the wrapColumn and startIndent are independant of the\npretty flags, and don't really have magic numbers for those\n(WRAP_COLUMN_DEFAULT sure exists, but the value isn't really surprising).\n\n> I don't see any other exported\n> functions that go beyond offering a \"bool pretty\" option, so\n> I think it might be a mistake for this one to be different.\n\nThere's the SQL function pg_get_viewdef_wrap() that accept a custom wrapColumn.\n\nThat being said I'm totally ok with just exposing a \"pretty\" parameter and use\nWRAP_COLUMN_DEFAULT. In any case I agree that exposing startIndent doesn't\nserve any purpose.\n\nI'm attaching a v5 with hopefully a better comment for the function, and only\nthe \"pretty\" parameter.", "msg_date": "Mon, 28 Mar 2022 11:12:38 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Add a pg_get_query_def function (was Re: Deparsing rewritten\n query)" }, { "msg_contents": "Julien Rouhaud <rjuju123@gmail.com> writes:\n> I'm attaching a v5 with hopefully a better comment for the function, and only\n> the \"pretty\" parameter.\n\nPushed with some minor cosmetic adjustments.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 28 Mar 2022 11:20:42 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Add a pg_get_query_def function (was Re: Deparsing rewritten\n query)" }, { "msg_contents": "On Mon, Mar 28, 2022 at 11:20:42AM -0400, Tom Lane wrote:\n> Julien Rouhaud <rjuju123@gmail.com> writes:\n> > I'm attaching a v5 with hopefully a better comment for the function, and only\n> > the \"pretty\" parameter.\n> \n> Pushed with some minor cosmetic adjustments.\n\nThanks a lot!\n\nI just published an extension using this for the use case I'm interested in.\n\n\n", "msg_date": "Mon, 28 Mar 2022 23:48:21 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Add a pg_get_query_def function (was Re: Deparsing rewritten\n query)" } ]
[ { "msg_contents": "Hello all,\n\n\nI'm moving to another company from July 1st. I may possibly not be allowed to do open source activities there, so let me say goodbye once. Thank you all for your help. I really enjoyed learning PostgreSQL and participating in its development.\n\nIt's a pity that I may not be able to part of PostgreSQL's great history until it becomes the most popular database (in the DB-Engines ranking.) However, if possible, I'd like to come back as just MauMau.\n\nI hope you all will succeed.\n\n\nRegards\nTakayuki Tsunakawa\n\n\n\n", "msg_date": "Sun, 27 Jun 2021 07:41:19 +0000", "msg_from": "\"tsunakawa.takay@fujitsu.com\" <tsunakawa.takay@fujitsu.com>", "msg_from_op": true, "msg_subject": "Farewell greeting" }, { "msg_contents": "> Hello all,\n> \n> \n> I'm moving to another company from July 1st. I may possibly not be allowed to do open source activities there, so let me say goodbye once. Thank you all for your help. I really enjoyed learning PostgreSQL and participating in its development.\n> \n> It's a pity that I may not be able to part of PostgreSQL's great history until it becomes the most popular database (in the DB-Engines ranking.) However, if possible, I'd like to come back as just MauMau.\n> \n> I hope you all will succeed.\n> \n> \n> Regards\n> Takayuki Tsunakawa\n\nGood luck, Tsunakawa-san!\n--\nTatsuo Ishii\nSRA OSS, Inc. Japan\nEnglish: http://www.sraoss.co.jp/index_en.php\nJapanese:http://www.sraoss.co.jp\n\n\n", "msg_date": "Sun, 27 Jun 2021 16:48:45 +0900 (JST)", "msg_from": "Tatsuo Ishii <ishii@sraoss.co.jp>", "msg_from_op": false, "msg_subject": "Re: Farewell greeting" }, { "msg_contents": "On Sun, Jun 27, 2021 at 07:41:19AM +0000, tsunakawa.takay@fujitsu.com wrote:\n> \n> I'm moving to another company from July 1st. I may possibly not be allowed\n> to do open source activities there, so let me say goodbye once. Thank you\n> all for your help. I really enjoyed learning PostgreSQL and participating in\n> its development.\n> \n> It's a pity that I may not be able to part of PostgreSQL's great history\n> until it becomes the most popular database (in the DB-Engines ranking.)\n> However, if possible, I'd like to come back as just MauMau.\n> \n> I hope you all will succeed.\n\nWish you all the best!\n\n\n", "msg_date": "Sun, 27 Jun 2021 16:40:25 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Farewell greeting" }, { "msg_contents": "On Sun, Jun 27, 2021 at 12:41 AM tsunakawa.takay@fujitsu.com\n<tsunakawa.takay@fujitsu.com> wrote:\n> I'm moving to another company from July 1st. I may possibly not be allowed to do open source activities there, so let me say goodbye once.\n\nIt's a pity that you may not be around anymore. I wish you all the best.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Sun, 27 Jun 2021 09:51:28 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: Farewell greeting" }, { "msg_contents": ">I'm moving to another company from July 1st. I may possibly not be allowed to do open source activities there, so let me say >goodbye once. Thank you all for your help. I really enjoyed learning PostgreSQL and participating in its development.\n\nThank you for everything, Tsunakawa-san.\nGood luck !!!\n\nRegards\nSho Kato\n-----Original Message-----\nFrom: tsunakawa.takay@fujitsu.com <tsunakawa.takay@fujitsu.com> \nSent: Sunday, June 27, 2021 4:41 PM\nTo: pgsql-hackers@lists.postgresql.org\nCc: MauMau <maumau307@gmail.com>\nSubject: Farewell greeting\n\nHello all,\n\n\nI'm moving to another company from July 1st. I may possibly not be allowed to do open source activities there, so let me say goodbye once. Thank you all for your help. I really enjoyed learning PostgreSQL and participating in its development.\n\nIt's a pity that I may not be able to part of PostgreSQL's great history until it becomes the most popular database (in the DB-Engines ranking.) However, if possible, I'd like to come back as just MauMau.\n\nI hope you all will succeed.\n\n\nRegards\nTakayuki Tsunakawa\n\n\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 01:47:35 +0000", "msg_from": "\"kato-sho@fujitsu.com\" <kato-sho@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Farewell greeting" }, { "msg_contents": "On Sun, Jun 27, 2021 at 07:41:19AM +0000, tsunakawa.takay@fujitsu.com wrote:\n> It's a pity that I may not be able to part of PostgreSQL's great\n> history until it becomes the most popular database (in the\n> DB-Engines ranking.) However, if possible, I'd like to come back as\n> just MauMau.\n\nThis is an open community. So there would be nothing preventing you\nto come back. Good luck on your next position and all the best.\n--\nMichael", "msg_date": "Mon, 28 Jun 2021 11:22:45 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Farewell greeting" }, { "msg_contents": "Hi Tsunakawa-san,\n\nOn 2021/06/27 16:41, tsunakawa.takay@fujitsu.com wrote:\n> I'm moving to another company from July 1st. I may possibly not be allowed to do open source activities there, so let me say goodbye once. Thank you all for your help. I really enjoyed learning PostgreSQL and participating in its development.\n> \n> It's a pity that I may not be able to part of PostgreSQL's great history until it becomes the most popular database (in the DB-Engines ranking.) However, if possible, I'd like to come back as just MauMau.\n> \n> I hope you all will succeed.\n\n\nGood luck in your future career! :-D\nSee you at the Japanese PG developers' meetup.\n\n\nThanks,\nTatsuro Yamada\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 11:53:26 +0900", "msg_from": "Tatsuro Yamada <tatsuro.yamada.tf@nttcom.co.jp>", "msg_from_op": false, "msg_subject": "Re: Farewell greeting" } ]
[ { "msg_contents": "> Hi,\n>\n> I sometimes have to deal with queries referencing multiple and/or complex\n> views. In such cases, it's quite troublesome to figure out what is the\n> query\n> really executed. Debug_print_rewritten isn't really useful for non trivial\n> queries, and manually doing the view expansion isn't great either.\n>\n> While not being ideal, I wouldn't mind using a custom extension for that\n> but\n> this isn't an option as get_query_def() is private and isn't likely to\n> change.\n>\n> As an alternative, maybe we could expose a simple SRF that would take care\n> of\n> rewriting the query and deparsing the resulting query tree(s)?\n>\n> I'm attaching a POC patch for that, adding a new pg_get_query_def(text)\n> SRF.\n+1\n\nIf you don't mind, I made small corrections to your patch.\n1. strcmp(sql, \"\") could not be replaced by sql[0] == '\\0'?\n2. the error message would not be: \"empty statement not allowed\"?\n3. initStringInfo(&buf) inside a loop, wouldn't it be exaggerated? each\ntime call palloc0.\n\nregards,\nRanier Vilela", "msg_date": "Sun, 27 Jun 2021 10:06:00 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Deparsing rewritten query" }, { "msg_contents": "On Sun, Jun 27, 2021 at 10:06:00AM -0300, Ranier Vilela wrote:\n> \n> 1. strcmp(sql, \"\") could not be replaced by sql[0] == '\\0'?\n\nIt's a debugging leftover that I forgot to remove. There's no point trying\nto catch an empty string as e.g. a single space would be exactly the same, and\nboth would be caught by the next (and correct) test.\n\n> 3. initStringInfo(&buf) inside a loop, wouldn't it be exaggerated? each\n> time call palloc0.\n\ninitStringInfo calls palloc, not palloc0.\n\nIt's unlikely to make any difference. Rules have been strongly discouraged for\nmany years, and if you have enough to make a noticeable difference here, you\nprobably have bigger problems. But no objection to reset the StringInfo\ninstead.\n\n\n", "msg_date": "Sun, 27 Jun 2021 21:35:35 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Deparsing rewritten query" } ]
[ { "msg_contents": "\nI am planning on forking the tree so we can start adding developments\nfor Postgres 15 in the upcoming commitfest. This will be done tomorrow,\nJune 28, late morning US East coast time. I will be following the\nprocedures laid out in src/tools/RELEASE_CHANGES under the heading\n\"Starting a New Development Cycle\".\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Sun, 27 Jun 2021 10:55:58 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": true, "msg_subject": "code fork June 28th" }, { "msg_contents": "\nOn 6/27/21 10:55 AM, Andrew Dunstan wrote:\n> I am planning on forking the tree so we can start adding developments\n> for Postgres 15 in the upcoming commitfest. This will be done tomorrow,\n> June 28, late morning US East coast time. I will be following the\n> procedures laid out in src/tools/RELEASE_CHANGES under the heading\n> \"Starting a New Development Cycle\".\n>\n>\n\n\nThis has been completed.\n\n\nLet the hacking begin ...\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 11:43:01 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": true, "msg_subject": "Re: code fork June 28th" }, { "msg_contents": "Andrew Dunstan <andrew@dunslane.net> writes:\n> This has been completed.\n\nDoesn't look like the buildfarm has heard of REL_14_STABLE yet?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 28 Jun 2021 13:04:56 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: code fork June 28th" }, { "msg_contents": "\nOn 6/28/21 1:04 PM, Tom Lane wrote:\n> Andrew Dunstan <andrew@dunslane.net> writes:\n>> This has been completed.\n> Doesn't look like the buildfarm has heard of REL_14_STABLE yet?\n>\n> \t\t\t\n\n\n\nWill do shortly. On at least one occasion we've been caught by updating\nit before the new branch had reached mirrors etc, so I usually wait a bit.\n\n\ncheers\n\n\nandrew\n\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 13:09:11 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": true, "msg_subject": "Re: code fork June 28th" } ]
[ { "msg_contents": "moonjelly just reported an interesting failure [1]. It seems that\nwith the latest bleeding-edge gcc, this code is misoptimized:\n\n /* check random range */\n if (imin > imax)\n {\n pg_log_error(\"empty range given to random\");\n return false;\n }\n else if (imax - imin < 0 || (imax - imin) + 1 < 0)\n {\n /* prevent int overflows in random functions */\n pg_log_error(\"random range is too large\");\n return false;\n }\n\nsuch that the second if-test doesn't fire. Now, according to the C99\nspec this code is broken, because the compiler is allowed to assume\nthat signed integer overflow doesn't happen, whereupon the second\nif-block is provably unreachable. The failure still represents a gcc\nbug, because we're using -fwrapv which should disable that assumption.\nHowever, not all compilers have that switch, so it'd be better to code\nthis in a spec-compliant way. I suggest applying the attached in\nbranches that have the required functions.\n\n[1] https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=moonjelly&dt=2021-06-26%2007%3A03%3A17\n\n\t\t\tregards, tom lane", "msg_date": "Sun, 27 Jun 2021 13:39:03 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Overflow hazard in pgbench" }, { "msg_contents": "I wrote:\n> ... according to the C99\n> spec this code is broken, because the compiler is allowed to assume\n> that signed integer overflow doesn't happen, whereupon the second\n> if-block is provably unreachable. The failure still represents a gcc\n> bug, because we're using -fwrapv which should disable that assumption.\n> However, not all compilers have that switch, so it'd be better to code\n> this in a spec-compliant way.\n\nBTW, for grins I tried building today's HEAD without -fwrapv, using\n\tgcc version 11.1.1 20210531 (Red Hat 11.1.1-3) (GCC) \nwhich is the newest version I have at hand. Not very surprisingly,\nthat reproduced the failure shown on moonjelly. However, after adding\nthe patch I proposed, \"make check-world\" passed! I was not expecting\nthat result; I supposed we still had lots of lurking assumptions of\ntraditional C overflow handling.\n\nI'm not in any hurry to remove -fwrapv, because (a) this result doesn't\nshow that we have no such assumptions, only that they must be lurking\nin darker, poorly-tested corners, and (b) I'm not aware of any reason\nto think that removing -fwrapv would provide benefits worth taking any\nrisks for. But we may be closer to being able to do without that\nswitch than I thought.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 27 Jun 2021 16:21:46 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Overflow hazard in pgbench" }, { "msg_contents": "Hello Tom,\n\n> moonjelly just reported an interesting failure [1].\n\nI noticed. I was planning to have a look at it, thanks for digging!\n\n> It seems that with the latest bleeding-edge gcc, this code is \n> misoptimized:\n\n> else if (imax - imin < 0 || (imax - imin) + 1 < 0)\n> {\n> /* prevent int overflows in random functions */\n> pg_log_error(\"random range is too large\");\n> return false;\n> }\n>\n> such that the second if-test doesn't fire. Now, according to the C99\n> spec this code is broken, because the compiler is allowed to assume\n> that signed integer overflow doesn't happen,\n\nHmmm, so it is not possible to detect these with standard arithmetic as \ndone by this code. Note that the code was written in 2016, ISTM pre C99 \nPostgres. I'm unsure about what a C compiler could assume on the previous \nstandard wrt integer arithmetic.\n\n> whereupon the second if-block is provably unreachable.\n\nIndeed.\n\n> The failure still represents a gcc bug, because we're using -fwrapv \n> which should disable that assumption.\n\nOk, I'll report it.\n\nI also see a good point with pgbench tests exercising edge cases.\n\n> However, not all compilers have that switch, so it'd be better to code\n> this in a spec-compliant way.\n\nOk.\n\n> I suggest applying the attached in branches that have the required \n> functions.\n\nThe latest gcc, recompiled yesterday, is still wrong, as shown by \nmoonjelly current status.\n\nThe proposed patch does fix the issue in pgbench TAP test. I'd suggest to \nadd unlikely() on all these conditions, as already done elsewhere. See \nattached version.\n\nI confirm that check-world passed with gcc head and its broken -fwrapv.\n\nI also recompiled after removing manually -fwrapv: Make check, pgbench TAP \ntests and check-world all passed. I'm not sure that edge case are well \nenough tested in postgres to be sure that all is ok just from these runs \nthough.\n\n-- \nFabien.", "msg_date": "Mon, 28 Jun 2021 10:13:11 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: Overflow hazard in pgbench" }, { "msg_contents": "Fabien COELHO <coelho@cri.ensmp.fr> writes:\n>> I suggest applying the attached in branches that have the required \n>> functions.\n\n> The proposed patch does fix the issue in pgbench TAP test. I'd suggest to \n> add unlikely() on all these conditions, as already done elsewhere. See \n> attached version.\n\nDone that way, though I'm skeptical that it makes any measurable\ndifference.\n\n> I also recompiled after removing manually -fwrapv: Make check, pgbench TAP \n> tests and check-world all passed. I'm not sure that edge case are well \n> enough tested in postgres to be sure that all is ok just from these runs \n> though.\n\nYeah, I'm afraid that in most places it'd take a specifically-designed\ntest case to expose a problem, if there is one.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 28 Jun 2021 12:43:30 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Overflow hazard in pgbench" }, { "msg_contents": "\n>> The failure still represents a gcc bug, because we're using -fwrapv which \n>> should disable that assumption.\n>\n> Ok, I'll report it.\n\nDone at https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101254\n\n-- \nFabien.\n\n\n", "msg_date": "Tue, 29 Jun 2021 11:47:11 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: Overflow hazard in pgbench" }, { "msg_contents": "Hello Tom,\n\n>>> The failure still represents a gcc bug, because we're using -fwrapv which \n>>> should disable that assumption.\n>> \n>> Ok, I'll report it.\n>\n> Done at https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101254\n\nFixed at r12-1916-ga96d8d67d0073a7031c0712bc3fb7759417b2125\n\nhttps://gcc.gnu.org/git/gitweb.cgi?p=gcc.git;h=a96d8d67d0073a7031c0712bc3fb7759417b2125\n\nJust under 10 hours from the bug report…\n\n-- \nFabien.", "msg_date": "Tue, 29 Jun 2021 22:52:16 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: Overflow hazard in pgbench" }, { "msg_contents": "Hi,\n\nOn 2021-06-27 16:21:46 -0400, Tom Lane wrote:\n> BTW, for grins I tried building today's HEAD without -fwrapv, using\n> \tgcc version 11.1.1 20210531 (Red Hat 11.1.1-3) (GCC)\n> which is the newest version I have at hand. Not very surprisingly,\n> that reproduced the failure shown on moonjelly. However, after adding\n> the patch I proposed, \"make check-world\" passed! I was not expecting\n> that result; I supposed we still had lots of lurking assumptions of\n> traditional C overflow handling.\n\nWe did fix a lot of them a few years back...\n\n\n> I'm not in any hurry to remove -fwrapv, because (a) this result doesn't\n> show that we have no such assumptions, only that they must be lurking\n> in darker, poorly-tested corners, and (b) I'm not aware of any reason\n> to think that removing -fwrapv would provide benefits worth taking any\n> risks for. But we may be closer to being able to do without that\n> switch than I thought.\n\nLack of failures after removing frwapv itself doesn't prove that much -\nvery commonly the compiler won't optimize based on the improved\nknowledge about value range. Additionally we probably don't exercise all\neffected places in our tests.\n\nubsan is able to catch all signed overflows. The last time I played\naround with that, tests still were hitting quite a few cases of\noverflows. But most not in particularly interesting places\n(e.g. cash_out, RIGHTMOST_ONE()) but also a few where it might be worth\nbeing careful about it in case a compiler disregards -fwrapv or doesn't\nimplement it (e.g. _dorand48()).\n\nIt might be worth setting up a bf animal with ubsan and enabled overflow\nchecking...\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Thu, 8 Jul 2021 19:10:21 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: Overflow hazard in pgbench" } ]
[ { "msg_contents": "Hi,\n\nestimating joins is one of the significant gaps related to extended\nstatistics, and I've been regularly asked about what we might do about\nthat. This is an early experimental patch that I think might help us\nwith improving this, possible even in PG15.\n\nNote: I do not claim this is exactly how it should be implemented, but\nit's probably sufficient to demonstrate the pros/cons of various\nalternative approaches, etc.\n\nIn short, the patch samples the tables and uses those samples to\nestimate selectivity for scans and joins. The samples are collected\nduring planning, which may be quite expensive - random I/O for each\nquery, etc. It'd be possible to build them during analyze, but that'd\nrequire solving serialization, tweak CREATE STATISTICS to handle join\nqueries, etc. I decided to keep the PoC simple.\n\nIt still uses CREATE STATISTICS with a new \"sample\" kind, instructing\nthe optimizer to use sampling when estimating clauses on the attributes.\n\nA little example demonstrating what the patch does:\n\n create table t (a int, b int, c int);\n\n insert into t select mod(i,10), mod(i,20), mod(i,40)\n from generate_series(1,10000000) s(i);\n\n analyze t;\n\n -- estimate without any statistics / sampling\n explain analyze select * from t where a = 0 and b = 0 and c = 0;\n\n QUERY PLAN\n -------------------------------------------------------------------\n Seq Scan on t (cost=0.00..229055.00 rows=1361 width=12)\n (actual time=0.025..761.571 rows=250000 loops=1)\n Filter: ((a = 0) AND (b = 0) AND (c = 0))\n Rows Removed by Filter: 9750000\n Planning Time: 0.471 ms\n Execution Time: 901.182 ms\n (5 rows)\n\n -- enable sampling on those columns\n create statistics s (sample) on a, b, c from t;\n\n explain analyze select * from t where a = 0 and b = 0 and c = 0;\n\n QUERY PLAN\n -------------------------------------------------------------------\n Seq Scan on t (cost=0.00..229055.00 rows=250390 width=12)\n (actual time=0.307..717.937 rows=250000 loops=1)\n Filter: ((a = 0) AND (b = 0) AND (c = 0))\n Rows Removed by Filter: 9750000\n Planning Time: 194.528 ms\n Execution Time: 851.832 ms\n (5 rows)\n\nOf course, in this case a MCV would work well too, because there are\nvery few combinations in (a,b,c) - a sample would work even when that's\nnot the case, and it has various other benefits (can estimate almost any\nexpression while MCV supports only a subset, etc.)\n\nNow, let's look at a join between a fact and a dimension table:\n\n create table f (d1 int, d2 int, f1 int, f2 int, f3 int);\n\n create table d (d1 int, d2 int, d3 int, d4 int, d5 int,\n primary key (d1, d2));\n\n insert into d select i, i, mod(i,100), mod(i,100), mod(i,100)\n from generate_series(0,999) s(i);\n\n insert into f select mod(i,1000), mod(i,1000), mod(i,100), mod(i,100),\n mod(i,100) from generate_series(1,1000000) s(i);\n\n analyze f, d;\n\n explain analyze select * from f join d using (d1,d2)\n where f1 < 50 and f2 < 50 and d3 < 50 and d4 < 50;\n\n QUERY PLAN\n ----------------------------------------------------------------------\n Hash Join (cost=25.75..22717.01 rows=63 width=32)\n (actual time=3.197..861.899 rows=500000 loops=1)\n Hash Cond: ((f.d1 = d.d1) AND (f.d2 = d.d2))\n -> Seq Scan on f (cost=0.00..21370.00 rows=251669 width=20)\n (actual time=0.033..315.401 rows=500000 loops=1)\n Filter: ((f1 < 50) AND (f2 < 50))\n Rows Removed by Filter: 500000\n -> Hash (cost=22.00..22.00 rows=250 width=20)\n (actual time=3.139..3.141 rows=500 loops=1)\n Buckets: 1024 Batches: 1 Memory Usage: 34kB\n -> Seq Scan on d (cost=0.00..22.00 rows=250 width=20)\n (actual time=0.018..1.706 rows=500 loops=1)\n Filter: ((d3 < 50) AND (d4 < 50))\n Rows Removed by Filter: 500\n Planning Time: 0.806 ms\n Execution Time: 1099.229 ms\n (12 rows)\n\nSo, not great - underestimated by 10000x is likely to lead to\ninefficient plans. And now with the samples enabled on both sides:\n\n create statistics s1 (sample) on d1, d2, f1, f2, f3 from f;\n create statistics s2 (sample) on d1, d2, d3, d4, d5 from d;\n\n QUERY PLAN\n ----------------------------------------------------------------------\n Hash Join (cost=29.50..24057.25 rows=503170 width=32)\n (actual time=0.630..837.483 rows=500000 loops=1)\n Hash Cond: ((f.d1 = d.d1) AND (f.d2 = d.d2))\n -> Seq Scan on f (cost=0.00..21370.00 rows=503879 width=20)\n (actual time=0.008..301.584 rows=500000 loops=1)\n Filter: ((f1 < 50) AND (f2 < 50))\n Rows Removed by Filter: 500000\n -> Hash (cost=22.00..22.00 rows=500 width=20)\n (actual time=0.616..0.618 rows=500 loops=1)\n Buckets: 1024 Batches: 1 Memory Usage: 34kB\n -> Seq Scan on d (cost=0.00..22.00 rows=500 width=20)\n (actual time=0.004..0.321 rows=500 loops=1)\n Filter: ((d3 < 50) AND (d4 < 50))\n Rows Removed by Filter: 500\n Planning Time: 603.442 ms\n Execution Time: 1071.735 ms\n (12 rows)\n\nYes, it takes 600ms to do the sampling, but I'm sure most of this can be\neliminated by optimizing the code and/or storing the samples just like\nother types of stats.\n\nNote that most of the 1000x underestimate is not due to poor estimates\nat the scan level, but mostly due to the join condition having two\ncorrelated clauses. Yes, adding a proper foreign key would probably\nimprove this (we already leverage this information in planning), but\nthere can be cross-table correlations between the other conditions, and\nthe FK can't help with that. Correlations between different dimension\ntables are quite common, and sampling can help with those.\n\nNote: There's another PoC patch using multi-column MCVs to improve join\nestimated - that has the same limitations as MCVs for scans. It works\nquite fine (only) when the MCV represents large part of the data, and it\ndoes not support evaluating arbitrary expressions.\n\n\nNow, a little bit about the implementation, sampling limitations etc.\n\nAt the scan level, sampling is fairly straightforward - the patch simply\nruns a TABLESAMPLE query through SPI, with a sample fraction calculated\nfrom a GUC (estimate_sample_rate, 1% by default) and statistics target.\nThe samples may be too large and the calculation may need some changes,\nbut that's a minor detail I think. Not sure SPI is the right way to do\nthis, but for PoC it's good enough.\n\nFor joins, sampling is way more complicated - we can't sample both\ntables randomly, because that'd require huge samples on both sides - as\nshown in [3], sampling n rows from a join with table having N rows\nrequires sqrt(n * N) from the table. Which is a lot.\n\nSo what this patch attempts to do is \"correlated sampling\", described in\n[1] and [3]. Imagine a join on a foreign key, as in the example query.\n(The patch only looks for a PK, for simplicity.)\n\nThis is a pretty common pattern, especially in star and snowflake\nqueries, which join a \"fact\" table to one or more \"dimension\" tables.\n\nThe \"correlated\" sampling means the \"fact\" table (side of the join\nwithout the PK) is sampled randomly, but the dimensions are simply\nscanned for matching rows. The PK means there can only be one matching\nrow for each sample one, so we're \"enriching\" the random sample.\n\nThis is what [1] describes as CS2, including how to extend the approach\nto joins without the PK/FK requirement and various corner cases, and [3]\nimproves that to leverage indexes. [4] discussed various CS2 variations,\naddressing various problems - reducing space requirements, etc.\n\nThe current PoC patch is however very simplistic and naive - for example\nit does not attempt to correlate joins with multiple dimensions, so for\nexample when joining F with D1 and then D2, we sample (F,D1) and then\n(F,D2) independently. This means we sample F twice, which can be quite\nexpensive, and it also fails to miss correlations between D1 and D2\n(which is common in actual data sets).\n\nThere are various other efficiency issues, because the joins go through\ncalc_joinrel_size_estimate and compute_semi_anti_join_factors, and each\nplace does the sampling again. The samples should be cached somewhere\nand reused, probably.\n\nI'm sure there's plenty open questions, some of which are mentioned in\nthe many XXX comments added to the patch.\n\nFWIW The patch does have some issues with expressions, so joins on\ncomplex expressions (e.g. ON ((a+b) = (c+d)) do not work properly. That\nshouldn't be a big deal for PoC, I think.\n\n\nregards\n\n\n[1] CS2: A new database synopsis for query estimation\nhttps://www.researchgate.net/publication/262350868_CS2_A_new_database_synopsis_for_query_estimation\n\n[2] Join Size Estimation Subject to Filter Conditions\nhttps://www.semanticscholar.org/paper/Join-Size-Estimation-Subject-to-Filter-Conditions-Vengerov-Menck/c8bd4caf0fc9c8a4fbffc7e05416901d4fd7a41b\n\n[3] Cardinality Estimation Done Right: Index-Based Join Sampling\nhttps://www.semanticscholar.org/paper/Cardinality-Estimation-Done-Right%3A-Index-Based-Join-Leis-Radke/15f211eaafc6ce421a511a413613e1d2683879d2\n\n[4] Improved Correlated Sampling for Join SizeEstimation\nhttps://www.comp.nus.edu.sg/~taining/estimation/report.pdf\n\n[5] A Survey on Advancing the DBMS Query Optimizer: Cardinality\nEstimation, Cost Model, and Plan Enumeration\nhttps://arxiv.org/abs/2101.01507\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Sun, 27 Jun 2021 19:55:24 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": true, "msg_subject": "PoC: using sampling to estimate joins / complex conditions" }, { "msg_contents": "On Sun, Jun 27, 2021 at 07:55:24PM +0200, Tomas Vondra wrote:\n> estimating joins is one of the significant gaps related to extended\n> statistics, and I've been regularly asked about what we might do about\n> that. This is an early experimental patch that I think might help us\n> with improving this, possible even in PG15.\n\nThe patch does not apply, so a rebase would be in place. I have\nswitched that as waiting on author for now, moving it to the next CF.\n--\nMichael", "msg_date": "Fri, 3 Dec 2021 16:46:56 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: PoC: using sampling to estimate joins / complex conditions" }, { "msg_contents": "Hi,\n\nOn 2021-06-27 19:55:24 +0200, Tomas Vondra wrote:\n> estimating joins is one of the significant gaps related to extended\n> statistics, and I've been regularly asked about what we might do about\n> that. This is an early experimental patch that I think might help us\n> with improving this, possible even in PG15.\n\nThe tests in this patch fail:\nhttps://cirrus-ci.com/task/5304621299138560\nhttps://api.cirrus-ci.com/v1/artifact/task/5304621299138560/regress_diffs/src/test/regress/regression.diffs\n\nLooks like the regression test output hasn't been updated?\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Tue, 4 Jan 2022 15:58:12 -0800", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: PoC: using sampling to estimate joins / complex conditions" }, { "msg_contents": "On 1/5/22 00:58, Andres Freund wrote:\n> Hi,\n> \n> On 2021-06-27 19:55:24 +0200, Tomas Vondra wrote:\n>> estimating joins is one of the significant gaps related to extended\n>> statistics, and I've been regularly asked about what we might do about\n>> that. This is an early experimental patch that I think might help us\n>> with improving this, possible even in PG15.\n> \n> The tests in this patch fail:\n> https://cirrus-ci.com/task/5304621299138560\n> https://api.cirrus-ci.com/v1/artifact/task/5304621299138560/regress_diffs/src/test/regress/regression.diffs\n> \n> Looks like the regression test output hasn't been updated?\n> \n\nYeah, I haven't updated some of the test output because some of those\nchanges are a bit wrong (and I think that's fine for a PoC patch). I\nshould have mentioned that in the message, though. Sorry about that.\n\nThere are three types of failures:\n\n\n1) Changes to deparsed statistics definition in \\d command:\n\n- \"public.ctlt_all_a_b_stat\" ON a, b FROM ctlt_all\n+ \"public.ctlt_all_a_b_stat\" (ndistinct, dependencies, mcv) ON a, b\nFROM ctlt_all\n\nThis happens because there's a new kind \"sample\" but it's not set by\ndefault if creating new statistics, and the deparsing logic decides it\nmeans it has to list the kinds explicitly. I've fixed this in the\nattached patch, but it was mostly harmless and I'm not sure this is how\nsample should behave.\n\n\n2) Three GUC parameters allowing to enable/disable sampling for\ndifferent parts of a query (scan, join, correlated join sampling). I\nstill consider those GUCs temporary, for experiments, but I've added\nthem to the expected output.\n\n\n3) Changes in estimates for OR conditions - a couple estimates get less\naccurate, because OR clauses are handled as a single clause the first\ntime we pass them to statext_clauselist_selectivity(). So we combine the\nresults incorrectly. OR clauses may need some changes, because it's\ncausing issues in other patches too (e.g. in the \"Var op Var\" one).\n\n\nI haven't done anything about (3) yet - it's a valid issue and needs to\nbe fixed (either by changing how we handle OR clauses, or maybe handling\nsamples and MCVs at the same time). Or maybe some other way. In any\ncase, there is more important stuff that needs fixing first.\n\nThe main issue is planning overhead - for the example in the first\nmessage, with a simple query joining two tables, you'll see this:\n\n Planning Time: 603.442 ms\n Execution Time: 1071.735 ms\n\nThere's a couple reasons why it takes this long:\n\n\n1) We sample the same relation repeatedly - once as a \"scan\" and then\nwhile estimating joins. And for this query we do that in three different\ncontexts:\n\n- set_joinrel_size_estimates\n- populate_joinrel_with_paths (twice)\n\nI guess we'll get different number of samples for different queries, but\nit'll get worse for queries joining more tables etc. It seems fairly\nsimple to cache the samples - for example in StatisticExtInfo (or maybe\nsomewhere else, to keep just one sample per relation, not per RTE).\n\nUnfortunately, I'm not sure this works for \"independent\" samples, not\nfor correlated ones (which are just FK lookups for another sample, so\nthat depends on what's the other sample). Which is a bummer, because\ncorrelated samples are the more expensive ones :-(\n\n\n2) The correlated samples are currently built using a query, executed\nthrough SPI in a loop. So given a \"driving\" sample of 30k rows, we do\n30k lookups - that'll take time, even if we do that just once and cache\nthe results.\n\nI'm sure there there's room for some improvement, though - for example\nwe don't need to fetch all columns included in the statistics object,\nbut just stuff referenced by the clauses we're estimating. That could\nimprove chance of using IOS etc.\n\nI wonder if there's a more efficient way to do this, in a batched manner\nor something ... But even then it'll still be quite expensive.\n\n\nThe only other alternative I can think of is collecting samples during\nANALYZE, and storing them somewhere. That'll be difficult for correlated\nsamples, particularly for transitive cases (in snowflake schema). But I\nbelieve it's doable (at least for cases covered by FK constraints).\n\nBut I'm not sure where/how to store the samples. An obvious option would\nbe to serialize them into pg_statistic_ext_data, and I'll try doing that\n(it's a bit like a huge MCV without any cutoff). But maybe there's a\nbetter way that would not require constant serialization/deserialization\nof many tuples.\n\n\nAny ideas about these options?\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Fri, 21 Jan 2022 01:06:37 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Re: PoC: using sampling to estimate joins / complex conditions" }, { "msg_contents": "Hi,\n\nOn 2022-01-21 01:06:37 +0100, Tomas Vondra wrote:\n> Yeah, I haven't updated some of the test output because some of those\n> changes are a bit wrong (and I think that's fine for a PoC patch). I\n> should have mentioned that in the message, though. Sorry about that.\n\nGiven that the patch hasn't been updated since January and that it's a PoC in\nthe final CF, it seems like it should at least be moved to the next CF? Or\nperhaps returned?\n\nI've just marked it as waiting-on-author for now - iirc that leads to fewer\nreruns by cfbot once it's failing...\n\n\n> 2) The correlated samples are currently built using a query, executed\n> through SPI in a loop. So given a \"driving\" sample of 30k rows, we do\n> 30k lookups - that'll take time, even if we do that just once and cache\n> the results.\n\nUgh, yea, that's going to increase overhead by at least a few factors.\n\n\n> I'm sure there there's room for some improvement, though - for example\n> we don't need to fetch all columns included in the statistics object,\n> but just stuff referenced by the clauses we're estimating. That could\n> improve chance of using IOS etc.\n\nYea. Even just avoid avoiding SPI / planner + executor seems likely to be a\nbig win.\n\n\nIt seems one more of the cases where we really need logic to recognize \"cheap\"\nvs \"expensive\" plans, so that we only do sampling when useful. I don't think\nthat's solved just by having a declarative syntax.\n\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Mon, 21 Mar 2022 16:35:41 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: PoC: using sampling to estimate joins / complex conditions" }, { "msg_contents": "\n\nOn 3/22/22 00:35, Andres Freund wrote:\n> Hi,\n> \n> On 2022-01-21 01:06:37 +0100, Tomas Vondra wrote:\n>> Yeah, I haven't updated some of the test output because some of those\n>> changes are a bit wrong (and I think that's fine for a PoC patch). I\n>> should have mentioned that in the message, though. Sorry about that.\n> \n> Given that the patch hasn't been updated since January and that it's a PoC in\n> the final CF, it seems like it should at least be moved to the next CF? Or\n> perhaps returned?\n> \n> I've just marked it as waiting-on-author for now - iirc that leads to fewer\n> reruns by cfbot once it's failing...\n> \n\nEither option works for me.\n\n> \n>> 2) The correlated samples are currently built using a query, executed\n>> through SPI in a loop. So given a \"driving\" sample of 30k rows, we do\n>> 30k lookups - that'll take time, even if we do that just once and cache\n>> the results.\n> \n> Ugh, yea, that's going to increase overhead by at least a few factors.\n> \n> \n>> I'm sure there there's room for some improvement, though - for example\n>> we don't need to fetch all columns included in the statistics object,\n>> but just stuff referenced by the clauses we're estimating. That could\n>> improve chance of using IOS etc.\n> \n> Yea. Even just avoid avoiding SPI / planner + executor seems likely to be a\n> big win.\n> \n> \n> It seems one more of the cases where we really need logic to recognize \"cheap\"\n> vs \"expensive\" plans, so that we only do sampling when useful. I don't think\n> that's solved just by having a declarative syntax.\n> \n\nRight.\n\nI was thinking about walking the first table, collecting all the values,\nand then doing a single IN () query for the second table - a bit like a\ncustom join (which seems a bit terrifying, TBH).\n\nBut even if we manage to make this much cheaper, there will still be\nsimple queries where it's going to be prohibitively expensive.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Tue, 22 Mar 2022 01:17:26 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Re: PoC: using sampling to estimate joins / complex conditions" } ]
[ { "msg_contents": "Hello,\n\nWhile trying to use sqlsmith with postgres compiled from the master\nbranch, I've found that the PQerrorMessage() function now returns\nnon-informational but not empty error message after the successful\nPQconnectdb() call.\n    conn = PQconnectdb(conninfo.c_str());\n    char *errmsg = PQerrorMessage(conn);\nreturns\n'connection to server on socket \"/tmp/ody8OuOaqV/.s.PGSQL.59860\" failed: '\n\nThe affected sqlsmith code:\nhttps://github.com/anse1/sqlsmith/blob/master/postgres.cc#L305\n\nBest regards,\nAlexander\n\n\n\n\n\n\n Hello,\n\n While trying to use sqlsmith with postgres compiled from the master\n branch, I've found that the PQerrorMessage()\n function now returns non-informational but not empty error message\n after the successful PQconnectdb() call.\n    conn = PQconnectdb(conninfo.c_str());\n     char *errmsg = PQerrorMessage(conn);\n returns\n 'connection to server on socket \"/tmp/ody8OuOaqV/.s.PGSQL.59860\"\n failed: '\n\n The affected sqlsmith code:\nhttps://github.com/anse1/sqlsmith/blob/master/postgres.cc#L305\n\n Best regards,\n Alexander", "msg_date": "Sun, 27 Jun 2021 23:00:00 +0300", "msg_from": "Alexander Lakhin <exclusion@gmail.com>", "msg_from_op": true, "msg_subject": "PQconnectdb/PQerrorMessage changed behavior on master" }, { "msg_contents": "Alexander Lakhin <exclusion@gmail.com> writes:\n> While trying to use sqlsmith with postgres compiled from the master\n> branch, I've found that the PQerrorMessage() function now returns\n> non-informational but not empty error message after the successful\n> PQconnectdb() call.\n\nYeah, see thread here:\n\nhttps://www.postgresql.org/message-id/20210506162651.GJ27406%40telsasoft.com\n\nsqlsmith is definitely Doing It Wrong there, but there's a\nreasonable question whether such coding is common.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 27 Jun 2021 16:07:03 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: PQconnectdb/PQerrorMessage changed behavior on master" }, { "msg_contents": "27.06.2021 23:07, Tom Lane wrote:\n>> While trying to use sqlsmith with postgres compiled from the master\n>> branch, I've found that the PQerrorMessage() function now returns\n>> non-informational but not empty error message after the successful\n>> PQconnectdb() call.\n> Yeah, see thread here:\n>\n> https://www.postgresql.org/message-id/20210506162651.GJ27406%40telsasoft.com\n>\n> sqlsmith is definitely Doing It Wrong there, but there's a\n> reasonable question whether such coding is common.\nThanks for info! I agree that sqlsmith's check is incorrect, nonetheless\nI was embarrassed by the incomplete error message.\n\nBest regards,\nAlexander\n\n\n", "msg_date": "Mon, 28 Jun 2021 07:00:00 +0300", "msg_from": "Alexander Lakhin <exclusion@gmail.com>", "msg_from_op": true, "msg_subject": "Re: PQconnectdb/PQerrorMessage changed behavior on master" } ]
[ { "msg_contents": "The wraparound failsafe mechanism added by commit 1e55e7d1 had minimal\ndocumentation -- just a basic description of how the GUCs work. I\nthink that it certainly merits some discussion under \"25.1. Routine\nVacuuming\" -- more specifically under \"25.1.5. Preventing Transaction\nID Wraparound Failures\". One reason why this didn't happen in the\noriginal commit was that I just didn't know where to start with it.\nThe docs in question have said this since 2006's commit 48188e16 first\nadded autovacuum_freeze_max_age:\n\n\"The sole disadvantage of increasing autovacuum_freeze_max_age (and\nvacuum_freeze_table_age along with it) is that the pg_xact and\npg_commit_ts subdirectories of the database cluster will take more\nspace...\"\n\nThis sentence seems completely unreasonable to me. It seems to just\nignore the huge disadvantage of increasing autovacuum_freeze_max_age:\nthe *risk* that the system will stop being able to allocate new XIDs\nbecause GetNewTransactionId() errors out with \"database is not\naccepting commands to avoid wraparound data loss...\". Sure, it's\npossible to take a lot of risk here without it ever blowing up in your\nface. And if it doesn't blow up then the downside really is zero. This\nis hardly a sensible way to talk about this important risk. Or any\nrisk at all.\n\nAt first I thought that the sentence was not just misguided -- it\nseemed downright bizarre. I thought that it was directly at odds with\nthe title \"Preventing Transaction ID Wraparound Failures\". I thought\nthat the whole point of this section was how not to have a wraparound\nfailure (as I understand the term), and yet we seem to deliberately\nignore the single most important practical aspect of making sure that\nthat doesn't happen. But I now suspect that the basic definitions have\nbeen mixed up in a subtle but important way.\n\nWhat the documentation calls a \"wraparound failure\" seems to be rather\ndifferent to what I thought that that meant. As I said, I thought that\nthat meant the condition of being unable to get new transaction IDs\n(at least until the DBA runs VACUUM in single user mode). But the\ndocumentation in question seems to actually define it as \"the\ncondition of an old MVCC snapshot failing to see a version from the\ndistant past, because somehow an XID wraparound suddenly makes it look\nas if it's in the distant future rather than in the past\". It's\nactually talking about a subtly different thing, so the \"sole\ndisadvantage\" sentence is not actually bizarre. It does still seem\nimpractical and confusing, though.\n\nI strongly suspect that my interpretation of what \"wraparound failure\"\nmeans is actually the common one. Of course the system is never under\nany circumstances allowed to give totally wrong answers to queries, no\nmatter what -- users should be able to take that much for granted.\nWhat users care about here is sensibly managing XIDs as a resource --\npreventing \"XID exhaustion\" while being conservative, but not\nridiculously conservative. Could the documentation be completely\nmisleading users here?\n\nI have two questions:\n\n1. Do I have this right? Is there really confusion about what a\n\"wraparound failure\" means, or is the confusion mine alone?\n\n2. How do I go about integrating discussion of the failsafe here?\nAnybody have thoughts on that?\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Sun, 27 Jun 2021 13:36:19 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": true, "msg_subject": "What is \"wraparound failure\", really?" }, { "msg_contents": "\nOn 6/27/21 4:36 PM, Peter Geoghegan wrote:\n> The wraparound failsafe mechanism added by commit 1e55e7d1 had minimal\n> documentation -- just a basic description of how the GUCs work. I\n> think that it certainly merits some discussion under \"25.1. Routine\n> Vacuuming\" -- more specifically under \"25.1.5. Preventing Transaction\n> ID Wraparound Failures\". One reason why this didn't happen in the\n> original commit was that I just didn't know where to start with it.\n> The docs in question have said this since 2006's commit 48188e16 first\n> added autovacuum_freeze_max_age:\n>\n> \"The sole disadvantage of increasing autovacuum_freeze_max_age (and\n> vacuum_freeze_table_age along with it) is that the pg_xact and\n> pg_commit_ts subdirectories of the database cluster will take more\n> space...\"\n>\n> This sentence seems completely unreasonable to me. It seems to just\n> ignore the huge disadvantage of increasing autovacuum_freeze_max_age:\n> the *risk* that the system will stop being able to allocate new XIDs\n> because GetNewTransactionId() errors out with \"database is not\n> accepting commands to avoid wraparound data loss...\". Sure, it's\n> possible to take a lot of risk here without it ever blowing up in your\n> face. And if it doesn't blow up then the downside really is zero. This\n> is hardly a sensible way to talk about this important risk. Or any\n> risk at all.\n>\n> At first I thought that the sentence was not just misguided -- it\n> seemed downright bizarre. I thought that it was directly at odds with\n> the title \"Preventing Transaction ID Wraparound Failures\". I thought\n> that the whole point of this section was how not to have a wraparound\n> failure (as I understand the term), and yet we seem to deliberately\n> ignore the single most important practical aspect of making sure that\n> that doesn't happen. But I now suspect that the basic definitions have\n> been mixed up in a subtle but important way.\n>\n> What the documentation calls a \"wraparound failure\" seems to be rather\n> different to what I thought that that meant. As I said, I thought that\n> that meant the condition of being unable to get new transaction IDs\n> (at least until the DBA runs VACUUM in single user mode). But the\n> documentation in question seems to actually define it as \"the\n> condition of an old MVCC snapshot failing to see a version from the\n> distant past, because somehow an XID wraparound suddenly makes it look\n> as if it's in the distant future rather than in the past\". It's\n> actually talking about a subtly different thing, so the \"sole\n> disadvantage\" sentence is not actually bizarre. It does still seem\n> impractical and confusing, though.\n>\n> I strongly suspect that my interpretation of what \"wraparound failure\"\n> means is actually the common one. Of course the system is never under\n> any circumstances allowed to give totally wrong answers to queries, no\n> matter what -- users should be able to take that much for granted.\n> What users care about here is sensibly managing XIDs as a resource --\n> preventing \"XID exhaustion\" while being conservative, but not\n> ridiculously conservative. Could the documentation be completely\n> misleading users here?\n>\n> I have two questions:\n>\n> 1. Do I have this right? Is there really confusion about what a\n> \"wraparound failure\" means, or is the confusion mine alone?\n>\n> 2. How do I go about integrating discussion of the failsafe here?\n> Anybody have thoughts on that?\n>\n\n\nAIUI, actual wraparound (i.e. an xid crossing the event horizon so it\nappears to be in the future) is no longer possible. But it once was a\nvery real danger. Maybe the docs haven't quite caught up.\n\n\nIn practical terms, there is an awful lot of head room between the\ndefault for autovacuum_freeze_max_age and any danger of major\nanti-wraparound measures. Say you increase it to 1bn from the default\n200m. That still leaves you ~1bn transactions of headroom.\n\n\ncheers\n\n\nandrew\n\n\n\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Sun, 27 Jun 2021 19:23:25 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: What is \"wraparound failure\", really?" }, { "msg_contents": "On Mon, Jun 28, 2021 at 8:36 AM Peter Geoghegan <pg@bowt.ie> wrote:\n> \"The sole disadvantage of increasing autovacuum_freeze_max_age (and\n> vacuum_freeze_table_age along with it) is that the pg_xact and\n> pg_commit_ts subdirectories of the database cluster will take more\n> space...\"\n\nJust by the way, if we're updating this sentence, it continues\n\"because it must store...\" but it should surely be \"because they must\nstore...\".\n\n\n", "msg_date": "Mon, 28 Jun 2021 12:23:31 +1200", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": false, "msg_subject": "Re: What is \"wraparound failure\", really?" }, { "msg_contents": "On Sun, Jun 27, 2021 at 4:23 PM Andrew Dunstan <andrew@dunslane.net> wrote:\n> AIUI, actual wraparound (i.e. an xid crossing the event horizon so it\n> appears to be in the future) is no longer possible. But it once was a\n> very real danger. Maybe the docs haven't quite caught up.\n\nThis was added a few years after freezing was first invented, which\nwas arguably the last time that the design fundamentally changed. I\nthink we all agree that it's fundamentally not okay to give wrong\nanswers to queries -- it doesn't even need to be stated in the docs\nIMV. So why does this section of the docs spend so much time talking\nabout something that fundamentally cannot happen? Why not have it\nfocus on the bad outcome that there is a real risk of instead? Namely\nthe risk of the system refusing to allow new XIDs (as a means of\navoiding the wrong answers when all else fails).\n\nIt's hard to talk about the new failsafe in this section of the docs\nnow, since it's unclear whether it exists to advise the user on ways\nof avoiding the \"can't allocate XIDs\" failure mode. It could be\ninterpreted that way, or it could just be explaining and/or justifying\nthe existence of the failure mode. That seems like a real problem.\n\n> In practical terms, there is an awful lot of head room between the\n> default for autovacuum_freeze_max_age and any danger of major\n> anti-wraparound measures. Say you increase it to 1bn from the default\n> 200m. That still leaves you ~1bn transactions of headroom.\n\nI agree that in practice that's often fine. But my point is that there\nis another very good reason to not increase autovacuum_freeze_max_age,\ncontrary to what the docs say (actually there is a far better reason\nthan truncating clog). Namely, increasing it will generally increase\nthe risk of VACUUM not finishing in time. If that happens the user\ngets the \"can't allocate XIDs\" failure mode (which is what I have\ncalled wraparound failure up until now), which is one of the worst\nthings that can happen. This makes the inability to truncate clog look\nlike a totally trivial issue in comparison.\n\nReasonable people can disagree about when and how increasing\nautovacuum_freeze_max_age becomes truly reckless. However, I don't\nthink that anybody would be willing to argue that setting it to the\nmaximum of 2 billion could ever make sense in production, to go with\nthe obvious extreme case. The benefits that you get from such a high\nsetting over and above what you get with a moderately high setting\n(perhaps 1 - 1.5 billion) are really quite small, while the risk\nshoots up fast past a certain point.\n\nRegardless of what the nuances of increasing autovacuum_freeze_max_age\nare, stating that the sole disadvantage is that you cannot truncate\nclog and other SLRUs is clearly wrong.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Sun, 27 Jun 2021 23:39:48 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": true, "msg_subject": "Re: What is \"wraparound failure\", really?" }, { "msg_contents": "On Mon, Jun 28, 2021 at 5:36 AM Peter Geoghegan <pg@bowt.ie> wrote:\n>\n> The wraparound failsafe mechanism added by commit 1e55e7d1 had minimal\n> documentation -- just a basic description of how the GUCs work. I\n> think that it certainly merits some discussion under \"25.1. Routine\n> Vacuuming\" -- more specifically under \"25.1.5. Preventing Transaction\n> ID Wraparound Failures\". One reason why this didn't happen in the\n> original commit was that I just didn't know where to start with it.\n> The docs in question have said this since 2006's commit 48188e16 first\n> added autovacuum_freeze_max_age:\n>\n> \"The sole disadvantage of increasing autovacuum_freeze_max_age (and\n> vacuum_freeze_table_age along with it) is that the pg_xact and\n> pg_commit_ts subdirectories of the database cluster will take more\n> space...\"\n>\n> This sentence seems completely unreasonable to me. It seems to just\n> ignore the huge disadvantage of increasing autovacuum_freeze_max_age:\n> the *risk* that the system will stop being able to allocate new XIDs\n> because GetNewTransactionId() errors out with \"database is not\n> accepting commands to avoid wraparound data loss...\". Sure, it's\n> possible to take a lot of risk here without it ever blowing up in your\n> face. And if it doesn't blow up then the downside really is zero. This\n> is hardly a sensible way to talk about this important risk. Or any\n> risk at all.\n>\n> At first I thought that the sentence was not just misguided -- it\n> seemed downright bizarre. I thought that it was directly at odds with\n> the title \"Preventing Transaction ID Wraparound Failures\". I thought\n> that the whole point of this section was how not to have a wraparound\n> failure (as I understand the term), and yet we seem to deliberately\n> ignore the single most important practical aspect of making sure that\n> that doesn't happen. But I now suspect that the basic definitions have\n> been mixed up in a subtle but important way.\n>\n> What the documentation calls a \"wraparound failure\" seems to be rather\n> different to what I thought that that meant. As I said, I thought that\n> that meant the condition of being unable to get new transaction IDs\n> (at least until the DBA runs VACUUM in single user mode). But the\n> documentation in question seems to actually define it as \"the\n> condition of an old MVCC snapshot failing to see a version from the\n> distant past, because somehow an XID wraparound suddenly makes it look\n> as if it's in the distant future rather than in the past\". It's\n> actually talking about a subtly different thing, so the \"sole\n> disadvantage\" sentence is not actually bizarre. It does still seem\n> impractical and confusing, though.\n>\n> I strongly suspect that my interpretation of what \"wraparound failure\"\n> means is actually the common one. Of course the system is never under\n> any circumstances allowed to give totally wrong answers to queries, no\n> matter what -- users should be able to take that much for granted.\n> What users care about here is sensibly managing XIDs as a resource --\n> preventing \"XID exhaustion\" while being conservative, but not\n> ridiculously conservative. Could the documentation be completely\n> misleading users here?\n>\n> I have two questions:\n>\n> 1. Do I have this right? Is there really confusion about what a\n> \"wraparound failure\" means, or is the confusion mine alone?\n>\n> 2. How do I go about integrating discussion of the failsafe here?\n> Anybody have thoughts on that?\n\nLooking through the doc again, it seems to me that there is no\nexplicit explanation for the worst situation. It might be true in\nprinciple that “XID wraparound failure” means catastrophic data loss\ndue to XID wraparound. But it doesn’t actually happen since we\ndisallow to allocate new XID three million XID before the wraparound.\nIn other words, entering the read-only mode is the worst situation in\nPostgreSQL in terms of XID consumption. There is some description of\nrefusing to start any new transactions at the end of section 25.1.5\nbut it seems neither enough nor accurate. It describes the read-only\nmode from only the aspect of a safeguard but not from the aspect of\nthe situation where we want to avoid. Explicitly describing also the\nlatter aspect could give weight to both the description of failsafe\nmode, especially why we skip some operations to speed up increasing\nrelfrozenxid in that mode, and another disadvantage of increasing\nautovacuum_freeze_max_age.\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Mon, 28 Jun 2021 17:44:44 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": false, "msg_subject": "Re: What is \"wraparound failure\", really?" }, { "msg_contents": "\nOn 6/28/21 2:39 AM, Peter Geoghegan wrote:\n> On Sun, Jun 27, 2021 at 4:23 PM Andrew Dunstan <andrew@dunslane.net> wrote:\n>\n\n>> In practical terms, there is an awful lot of head room between the\n>> default for autovacuum_freeze_max_age and any danger of major\n>> anti-wraparound measures. Say you increase it to 1bn from the default\n>> 200m. That still leaves you ~1bn transactions of headroom.\n> I agree that in practice that's often fine. But my point is that there\n> is another very good reason to not increase autovacuum_freeze_max_age,\n> contrary to what the docs say (actually there is a far better reason\n> than truncating clog). Namely, increasing it will generally increase\n> the risk of VACUUM not finishing in time. If that happens the user\n> gets the \"can't allocate XIDs\" failure mode (which is what I have\n> called wraparound failure up until now), which is one of the worst\n> things that can happen. This makes the inability to truncate clog look\n> like a totally trivial issue in comparison.\n>\n> Reasonable people can disagree about when and how increasing\n> autovacuum_freeze_max_age becomes truly reckless. However, I don't\n> think that anybody would be willing to argue that setting it to the\n> maximum of 2 billion could ever make sense in production, to go with\n> the obvious extreme case. The benefits that you get from such a high\n> setting over and above what you get with a moderately high setting\n> (perhaps 1 - 1.5 billion) are really quite small, while the risk\n> shoots up fast past a certain point.\n>\n> Regardless of what the nuances of increasing autovacuum_freeze_max_age\n> are, stating that the sole disadvantage is that you cannot truncate\n> clog and other SLRUs is clearly wrong.\n>\n\n\nSure, I'm not suggesting the docs can't have some improvement.\n\nThis is one of those things that in my experience most people don't get.\nIndeed, I didn't really get it either until I had to explain it with\nsome clarity to a very confused customer. And I find it's best explained\nby showing what bad results are being avoided by it. Freezing is one of\nthose almost useless things you just have to do. It doesn't help that\nit's tangled up with VACUUM, so when you explain that it's not about\nreclaiming dead space heads start to explode.\n\nBut if you're really worried about people setting\nautovacuum_freeze_max_age too high, then maybe we should be talking\nabout capping it at a lower level rather than adjusting the docs that\nmost users don't read.\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 08:51:50 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: What is \"wraparound failure\", really?" }, { "msg_contents": "On Mon, Jun 28, 2021 at 08:51:50AM -0400, Andrew Dunstan wrote:\n> On 6/28/21 2:39 AM, Peter Geoghegan wrote:\n> > I agree that in practice that's often fine. But my point is that there\n> > is another very good reason to not increase autovacuum_freeze_max_age,\n> > contrary to what the docs say (actually there is a far better reason\n> > than truncating clog). Namely, increasing it will generally increase\n> > the risk of VACUUM not finishing in time.\n\nYep, that doc section's priorities are out of date.\n\n> But if you're really worried about people setting\n> autovacuum_freeze_max_age too high, then maybe we should be talking\n> about capping it at a lower level rather than adjusting the docs that\n> most users don't read.\n\nIf a GUC minimum or maximum feels like a mainstream choice, it's probably too\nstrict. Hence, I think the current maximum is fine. At 93% of the XID space,\nit's not risk-averse, but it's not absurd.\n\n\n", "msg_date": "Tue, 29 Jun 2021 20:07:27 -0700", "msg_from": "Noah Misch <noah@leadboat.com>", "msg_from_op": false, "msg_subject": "Re: What is \"wraparound failure\", really?" }, { "msg_contents": "On Mon, Jun 28, 2021 at 8:52 AM Andrew Dunstan <andrew@dunslane.net> wrote:\n> But if you're really worried about people setting\n> autovacuum_freeze_max_age too high, then maybe we should be talking\n> about capping it at a lower level rather than adjusting the docs that\n> most users don't read.\n\nThe problem is that the setting is measuring something that is a\npretty poor proxy for the thing we actually care about. It's measuring\nthe XID age at which we're going to start forcing vacuums on tables\nthat don't otherwise need to be vacuumed, but the thing we care about\nis the XID age at which those vacuums are going to *finish*. Now maybe\nyou think that's a minor difference, and if your tables are small, it\nis, but if they're really big, it's not. If you have only tables that\nare say 1GB in size and your system is otherwise well-configured, you\ncould probably crank autovacuum_freeze_max_age up all the way to the\nmax without a problem. But if you have 1TB tables, you are going to\nneed a lot more headroom. The exact amount of headroom you need\ndepends especially on the size of your largest tables, but also on how\nwell-distributed the relfrozenxid values are, and on the total sizes\nof all your tables, on your I/O subsystem, on your XID consumption\nrate, on your vacuum delay settings, and on whether you want to make\nany allowance for the rare but possible scenario where vacuum dies to\nan ERROR. This means that in practice nobody knows whether a\nparticular setting of autovacuum_freeze_max_age on a particular system\nis safe or not, except in the absolutely most obvious cases. Capping\nit at a lower level would prevent some people from doing things that\nare perfectly safe and still not prevent other people from doing\nthings that are horribly dangerous.\n\nI think what we really need here is some kind of deadline-based\nscheduler. As Peter says, the problem is that we might run out of\nXIDs. The system should be constantly thinking about that and taking\nappropriate emergency actions to make sure it doesn't happen. Right\nnow it's really pretty chill about the possibility of looming\ndisaster. Imagine that you hire a babysitter and tell them to get the\nkids out of the house if there's a fire. While you're out, a volcano\nerupts down the block. A giant cloud of ash forms and there's lava\neverywhere, even touching the house, which begins to smolder, but the\nbabysitter just sits there and watches TV. As soon as the first flames\nappear, the babysitter stops watching TV, gets the kids, and tries to\nleave the premises. That's our autovacuum scheduler! It has no\ninclination or ability to see the future; it makes decisions entirely\nbased on the present state of things. In a lot of cases that's OK, but\nsometimes it leads to a completely ridiculous outcome.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Wed, 30 Jun 2021 09:46:41 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: What is \"wraparound failure\", really?" }, { "msg_contents": "On Wed, Jun 30, 2021 at 6:46 AM Robert Haas <robertmhaas@gmail.com> wrote:\n> The problem is that the setting is measuring something that is a\n> pretty poor proxy for the thing we actually care about. It's measuring\n> the XID age at which we're going to start forcing vacuums on tables\n> that don't otherwise need to be vacuumed, but the thing we care about\n> is the XID age at which those vacuums are going to *finish*. Now maybe\n> you think that's a minor difference, and if your tables are small, it\n> is, but if they're really big, it's not. If you have only tables that\n> are say 1GB in size and your system is otherwise well-configured, you\n> could probably crank autovacuum_freeze_max_age up all the way to the\n> max without a problem. But if you have 1TB tables, you are going to\n> need a lot more headroom.\n\nI 100% agree with all of that. However, I can't help but notice that\nyour argument seems to work best as an argument against how freezing\nworks in general. The scheduling is way too complex because we're\nfundamentally trying to model something that is way too complex and\nnonlinear by its very nature. It's true that we can do a better job by\ncontinually updating our understanding of the state of the system\ndynamically, during each VACUUM. But maybe we should get rid of\nfreezing instead. Is it really so hard to do that, in the grand scheme\nof things?\n\nWe have tuple freezing because we need it to solve a problem with the\n\"physical database\" (not the \"logical database\"). Namely the problem\nof having 32-bit XIDs in tuple headers when 64-bit XIDs are\ntheoretically what we need. I'm not actually in favor of 64-bit XIDs\nin tuple headers (or anything like it), but I am in favor of at least\nsolving the problem with a true \"physical database\" level solution.\nThe definition of freezing unnecessarily couples how we handle the XID\nissue with GC by VACUUM, which makes everything much more fragile. A\nfrozen tuple must necessarily be visible to any possible MVCC\nsnapshot. That's really fragile, in many different ways. It's also\nunnecessary.\n\nWhy should XID wraparound be a problem for the entire system? Why not\njust make it a problem for any very old MVCC snapshots that are\n*actually* about to be affected? Some kind of \"snapshot too old\"\napproach seems quite possible. I think that we can do a lot better\nthan freezing within the confines of the current heapam design (or the\ndesign prior to the introduction of freezing ~20 years ago). Once\naborted XIDs are removed eagerly, a strict \"logical vs physical\"\nseparation of concerns can be imposed.\n\nI'm sorry to go on about this again and again, but it really does seem\nrelated to what you're saying. The current freezing design is hard to\nmodel because it's inherently fragile.\n\n> I think what we really need here is some kind of deadline-based\n> scheduler. As Peter says, the problem is that we might run out of\n> XIDs. The system should be constantly thinking about that and taking\n> appropriate emergency actions to make sure it doesn't happen. Right\n> now it's really pretty chill about the possibility of looming\n> disaster. Imagine that you hire a babysitter and tell them to get the\n> kids out of the house if there's a fire. While you're out, a volcano\n> erupts down the block. A giant cloud of ash forms and there's lava\n> everywhere, even touching the house, which begins to smolder, but the\n> babysitter just sits there and watches TV. As soon as the first flames\n> appear, the babysitter stops watching TV, gets the kids, and tries to\n> leave the premises. That's our autovacuum scheduler! It has no\n> inclination or ability to see the future; it makes decisions entirely\n> based on the present state of things. In a lot of cases that's OK, but\n> sometimes it leads to a completely ridiculous outcome.\n\nYeah, it's still pretty absurd, even with the failsafe.\n\nTo extend your analogy, in the real world the babysitter can afford to\nmake very conservative assumptions about whether or not the house is\nabout to catch fire. In practice the chances of that happening on any\ngiven day are certainly very low -- it'll probably never come close to\nhappening even once. And there is an inherent asymmetry, since of\ncourse the cost of a false positive is that the friends reunion\nepisode is unnecessarily cut short, which is totally inconsequential\ncompared to the cost of a false negative. If there wasn't such a big\nasymmetry then what we'd probably do is not even think about what the\nbabysitter does -- we just wouldn't care at all.\n\nAnyway, I'll try to come up with a way of rewording this section of\nthe docs that mostly preserves its existing structure, but makes it\npossible to talk about the failsafe. The current structure of this\nsection of the docs is needlessly ambiguous, but I think that that can\nbe fixed without changing too much. FWIW I have heard things that\nsuggest that some users believe that modern PostgreSQL can actually\nallow \"the past to look like the future\" in some cases -- probably\nbecause of the wording here. This area of the system certainly is\nscary, but it's not quite that scary.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Wed, 30 Jun 2021 10:43:24 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": true, "msg_subject": "Re: What is \"wraparound failure\", really?" } ]
[ { "msg_contents": "This is crashing repeatedly during insert/update immediately after upgrading an\ninstance to v14, from v13.3. In case it matters, the cluster was originally\ninitdb at 13.2.\n\nTRAP: FailedAssertion(\"_bt_posting_valid(nposting)\", File: \"nbtdedup.c\", Line: 1062, PID: 28580)\npostgres: telsasoft ts 127.0.0.1(52250) INSERT(ExceptionalCondition+0x8d)[0x967d1d]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT(_bt_swap_posting+0x2cd)[0x507cdd]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT[0x509a14]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT(_bt_doinsert+0xcb7)[0x50d0b7]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT(btinsert+0x52)[0x5130f2]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT(ExecInsertIndexTuples+0x231)[0x687b81]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT[0x6b8718]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT[0x6b9297]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT(standard_ExecutorRun+0x142)[0x688b32]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT[0x82da8a]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT[0x82e673]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT[0x82e936]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT(PortalRun+0x2eb)[0x82ec8b]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT(PostgresMain+0x1f97)[0x82c777]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT[0x48f71a]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT(PostmasterMain+0x1138)[0x794c98]\npostgres: telsasoft ts 127.0.0.1(52250) INSERT(main+0x6f2)[0x491292]\n\n< 2021-06-27 23:46:43.257 CAT >DETAIL: Failed process was running: INSERT INTO alarms(...\n\n#3 0x0000000000507cdd in _bt_swap_posting (newitem=newitem@entry=0x2011c00, oposting=oposting@entry=0x7f6de58e2a78, postingoff=postingoff@entry=62) at nbtdedup.c:1062\n nhtids = <optimized out>\n replacepos = 0x2011dac \"\"\n nposting = 0x2011c28\n __func__ = \"_bt_swap_posting\"\n#4 0x0000000000509a14 in _bt_insertonpg (rel=rel@entry=0x7f6dfd3cd628, itup_key=itup_key@entry=0x2011b40, buf=15, cbuf=cbuf@entry=0, stack=stack@entry=0x2011bd8, itup=0x2011c00, itup@entry=0x200d608, itemsz=16, \n newitemoff=2, postingoff=62, split_only_page=split_only_page@entry=false) at nbtinsert.c:1174\n itemid = 0x7f6de58e0e1c\n page = 0x7f6de58e0e00 \"\\200\"\n opaque = 0x7f6de58e2df0\n isleaf = true\n isroot = false\n isrightmost = false\n isonly = false\n oposting = 0x7f6de58e2a78\n origitup = <optimized out>\n nposting = 0x0\n __func__ = \"_bt_insertonpg\"\n#5 0x000000000050d0b7 in _bt_doinsert (rel=rel@entry=0x7f6dfd3cd628, itup=itup@entry=0x200d608, checkUnique=checkUnique@entry=UNIQUE_CHECK_NO, indexUnchanged=indexUnchanged@entry=false, \n heapRel=heapRel@entry=0x7f6dfd48ba80) at nbtinsert.c:257\n newitemoff = 0\n is_unique = false\n insertstate = {itup = 0x200d608, itemsz = 16, itup_key = 0x2011b40, buf = 15, bounds_valid = true, low = 2, stricthigh = 3, postingoff = 62}\n itup_key = <optimized out>\n checkingunique = <optimized out>\n#6 0x00000000005130f2 in btinsert (rel=0x7f6dfd3cd628, values=<optimized out>, isnull=<optimized out>, ht_ctid=0x212e250, heapRel=0x7f6dfd48ba80, checkUnique=UNIQUE_CHECK_NO, indexUnchanged=false, \n indexInfo=0x200d2e8) at nbtree.c:199\n result = <optimized out>\n itup = 0x200d608\n#7 0x0000000000687b81 in ExecInsertIndexTuples (resultRelInfo=resultRelInfo@entry=0x200cd90, slot=slot@entry=0x212e220, estate=estate@entry=0x212c6b0, update=update@entry=false, noDupErr=noDupErr@entry=false, \n specConflict=specConflict@entry=0x0, arbiterIndexes=arbiterIndexes@entry=0x0) at execIndexing.c:415\n\n\n(gdb) p *newitem\n$2 = {t_tid = {ip_blkid = {bi_hi = 0, bi_lo = 22}, ip_posid = 4}, t_info = 32784}\n(gdb) p *oposting\n$3 = {t_tid = {ip_blkid = {bi_hi = 0, bi_lo = 16}, ip_posid = 8333}, t_info = 41824}\n\nI will save a copy of the data dir and see if reindexing helps.\nLet me know if there's anything else I can provide.\n\n-- \nJustin\n\n\n", "msg_date": "Sun, 27 Jun 2021 16:57:03 -0500", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": true, "msg_subject": "pg14b2: FailedAssertion(\"_bt_posting_valid(nposting)\", File:\n \"nbtdedup.c\", ..." }, { "msg_contents": "Can you please amcheck all of the indexes?\n\nPeter Geoghegan\n(Sent from my phone)\n\nCan you please amcheck all of the indexes? Peter Geoghegan(Sent from my phone)", "msg_date": "Sun, 27 Jun 2021 15:08:13 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: pg14b2: FailedAssertion(\"_bt_posting_valid(nposting)\", File:\n \"nbtdedup.c\", ..." }, { "msg_contents": "Would also be good to get a raw binary copy of the page image in question.\nHopefully the data isn't confidential. Same gdb procedure as before.\n\nThanks\n\nPeter Geoghegan\n(Sent from my phone)\n\nWould also be good to get a raw binary copy of the page image in question. Hopefully the data isn't confidential. Same gdb procedure as before. Thanks Peter Geoghegan(Sent from my phone)", "msg_date": "Sun, 27 Jun 2021 15:16:57 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: pg14b2: FailedAssertion(\"_bt_posting_valid(nposting)\", File:\n \"nbtdedup.c\", ..." }, { "msg_contents": "Did you also change the kernel on upgrade? I recall that that was a factor\non the other recent bug thread.\n\nPeter Geoghegan\n(Sent from my phone)\n\nDid you also change the kernel on upgrade? I recall that that was a factor on the other recent bug thread. Peter Geoghegan(Sent from my phone)", "msg_date": "Sun, 27 Jun 2021 15:18:19 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: pg14b2: FailedAssertion(\"_bt_posting_valid(nposting)\", File:\n \"nbtdedup.c\", ..." }, { "msg_contents": "On Sun, Jun 27, 2021 at 03:08:13PM -0700, Peter Geoghegan wrote:\n> Can you please amcheck all of the indexes?\n\nts=# SELECT bt_index_check('child.alarms_null_alarm_clear_time_idx'::regclass);\nERROR: item order invariant violated for index \"alarms_null_alarm_clear_time_idx\"\nDETAIL: Lower index tid=(1,77) (points to heap tid=(29,9)) higher index tid=(1,78) (points to heap tid=(29,9)) page lsn=80/4B9C69D0.\n\nts=# SELECT itemoffset, ctid ,itemlen, nulls, vars, dead, htid FROM bt_page_items('child.alarms_null_alarm_clear_time_idx', 1);\n itemoffset | ctid | itemlen | nulls | vars | dead | htid \n------------+-----------+---------+-------+------+------+---------\n...\n 77 | (29,9) | 16 | t | f | f | (29,9)\n 78 | (29,9) | 16 | t | f | f | (29,9)\n\nts=# SELECT lp, lp_off, lp_flags, lp_len, t_xmin, t_xmax, t_field3, t_ctid, t_infomask2, t_infomask, t_hoff, t_bits, t_oid FROM heap_page_items(get_raw_page('child.alarms_null', 29));\n lp | lp_off | lp_flags | lp_len | t_xmin | t_xmax | t_field3 | t_ctid | t_infomask2 | t_infomask | t_hoff | t_bits | t_oid \n----+--------+----------+--------+--------+----------+----------+---------+-------------+------------+--------+------------------------------------------+-------\n 1 | 6680 | 1 | 1512 | 88669 | 27455486 | 44 | (29,1) | 8225 | 10691 | 32 | 1100001111111111111111101111111110000000 | \n 2 | 6 | 2 | 0 | | | | | | | | | \n 3 | 5168 | 1 | 1512 | 87374 | 27455479 | 37 | (29,3) | 8225 | 10691 | 32 | 1100001111111111111111101111111110000000 | \n 4 | 4192 | 1 | 976 | 148104 | 27574887 | 0 | (29,4) | 8225 | 10695 | 32 | 1100001111111111111111101111111110000000 | \n 5 | 10 | 2 | 0 | | | | | | | | | \n 6 | 3216 | 1 | 976 | 148137 | 27574888 | 0 | (29,6) | 40993 | 10695 | 32 | 1100001111111111111111101111111110000000 | \n 7 | 8 | 2 | 0 | | | | | | | | | \n 8 | 2240 | 1 | 976 | 47388 | 27574858 | 7 | (29,8) | 40993 | 10695 | 32 | 1100001111111111111111101111111110000000 | \n 9 | 0 | 3 | 0 | | | | | | | | | \n 10 | 1264 | 1 | 976 | 148935 | 27574889 | 0 | (29,10) | 40993 | 10695 | 32 | 1100001111111111111111101111111110000000 | \n 11 | 0 | 3 | 0 | | | | | | | | | \n 12 | 0 | 3 | 0 | | | | | | | | | \n(12 rows)\n\n(gdb) fr 4\n#4 0x0000000000509a14 in _bt_insertonpg (rel=rel@entry=0x7f6dfd3cd628, itup_key=itup_key@entry=0x2011b40, buf=15, cbuf=cbuf@entry=0, stack=stack@entry=0x2011bd8, itup=0x2011c00, itup@entry=0x200d608, itemsz=16, \n newitemoff=2, postingoff=62, split_only_page=split_only_page@entry=false) at nbtinsert.c:1174\n1174 in nbtinsert.c\n(gdb) p page\n$5 = 0x7f6de58e0e00 \"\\200\"\n(gdb) dump binary memory /tmp/dump_block.page page (page + 8192)\n\nts=# SELECT lp, lp_off, lp_flags, lp_len, t_xmin, t_xmax, t_field3, t_ctid, t_infomask2, t_infomask, t_hoff, t_bits, t_oid FROM heap_page_items(pg_read_binary_file('/tmp/dump_block.page')) WHERE t_xmin IS NOT NULL;\n lp | lp_off | lp_flags | lp_len | t_xmin | t_xmax | t_field3 | t_ctid | t_infomask2 | t_infomask | t_hoff | t_bits | t_oid \n-----+--------+----------+--------+---------+------------+----------+--------+-------------+------------+--------+--------+-------\n 1 | 8152 | 1 | 24 | 1048576 | 2685931521 | 0 | (0,0) | 0 | 120 | 1 | | \n 2 | 7288 | 1 | 864 | 1048576 | 2740985997 | 0 | (0,0) | 0 | 1 | 0 | | \n 67 | 6368 | 1 | 920 | 1048576 | 2744656022 | 0 | (0,0) | 33 | 4 | 0 | | \n 137 | 5056 | 1 | 1312 | 1048576 | 2770346200 | 0 | (0,0) | 69 | 6 | 0 | | \n 142 | 4608 | 1 | 448 | 1048576 | 2713722952 | 0 | (0,0) | 107 | 4 | 0 | | \n(5 rows)\n\nI didn't change the kernel here, nor on the previous bug report - it was going\nto be my \"next step\", until I found the stuck autovacuum, and I mentioned it\nfor context, but probably just confused things.\n\n-- \nJustin\n\n\n", "msg_date": "Sun, 27 Jun 2021 17:34:56 -0500", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": true, "msg_subject": "Re: pg14b2: FailedAssertion(\"_bt_posting_valid(nposting)\", File:\n \"nbtdedup.c\", ..." }, { "msg_contents": "I've just realized that this VM has a strange storage configuration.\n\nIt's using LVM thin pools, which I don't use anywhere else.\nSomeone else set this up, and I think I've literally never used pools before.\nSome time ago, the pool ran out of space, and I ran LVM repair on it.\nIt seems very possible that's the issue.\nA latent problem might've been tickled by pg_upgrade --link.\n\nThat said, the relevant table is the active \"alarms\" table, and it would've\ngotten plenty of DML with no issue for months running v13.\n\nFeel free to dismiss this report if it seems dubious.\n\n-- \nJustin\n\n\n", "msg_date": "Sun, 27 Jun 2021 20:47:51 -0500", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": true, "msg_subject": "Re: pg14b2: FailedAssertion(\"_bt_posting_valid(nposting)\", File:\n \"nbtdedup.c\", ..." }, { "msg_contents": "On Sun, Jun 27, 2021 at 6:47 PM Justin Pryzby <pryzby@telsasoft.com> wrote:\n> I've just realized that this VM has a strange storage configuration.\n>\n> It's using LVM thin pools, which I don't use anywhere else.\n> Someone else set this up, and I think I've literally never used pools before.\n> Some time ago, the pool ran out of space, and I ran LVM repair on it.\n> It seems very possible that's the issue.\n> A latent problem might've been tickled by pg_upgrade --link.\n\nAnything is possible, of course, but even if this is a bug in Postgres\nit isn't particularly likely to be a bug in the nbtree code. We see\nclear signs of general corruption here, which is apparently not\nlimited to the one page that you supplied to me privately -- since\nAFAICT that's not the page that amcheck throws the error on.\n\n> That said, the relevant table is the active \"alarms\" table, and it would've\n> gotten plenty of DML with no issue for months running v13.\n\nIt might not have been visibly broken without assertions enabled,\nthough. I sprinkled nbtdedup.c with these _bt_posting_valid()\nassertions just because it was easy. The assertions were bound to\ncatch some problem sooner or later, and had acceptable overhead.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Sun, 27 Jun 2021 23:08:42 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: pg14b2: FailedAssertion(\"_bt_posting_valid(nposting)\", File:\n \"nbtdedup.c\", ..." }, { "msg_contents": "On Sun, Jun 27, 2021 at 11:08 PM Peter Geoghegan <pg@bowt.ie> wrote:\n> > That said, the relevant table is the active \"alarms\" table, and it would've\n> > gotten plenty of DML with no issue for months running v13.\n>\n> It might not have been visibly broken without assertions enabled,\n> though. I sprinkled nbtdedup.c with these _bt_posting_valid()\n> assertions just because it was easy. The assertions were bound to\n> catch some problem sooner or later, and had acceptable overhead.\n\nObviously nothing stops you from running amcheck on the original\ndatabase that you're running in production. You won't need to have\nenabled assertions to catch the same problem that way. This seems like\nthe best way to isolate the problem. I strongly suspect that it's the\nLVM issue for my own reasons: nothing changed during the Postgres 14\ncycle that seems truly related.\n\nThe index deletion stuff (commit d168b666823) might seem like an\nobvious possible culprit, but I consider it unlikely. I added many\ndefensive assertions to that code too: _bt_bottomupdel_pass() also\nuses exactly the same kind of _bt_posting_valid() assertions directly.\nPlus _bt_delitems_delete_check() is highly defensive with assertions\nwhen it processes a posting list tuple. If there was a problem with\nany of that code it seems very likely that those assertions would have\nfailed first.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Mon, 28 Jun 2021 13:42:25 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: pg14b2: FailedAssertion(\"_bt_posting_valid(nposting)\", File:\n \"nbtdedup.c\", ..." }, { "msg_contents": "On Mon, Jun 28, 2021 at 01:42:25PM -0700, Peter Geoghegan wrote:\n> On Sun, Jun 27, 2021 at 11:08 PM Peter Geoghegan <pg@bowt.ie> wrote:\n> > > That said, the relevant table is the active \"alarms\" table, and it would've\n> > > gotten plenty of DML with no issue for months running v13.\n> >\n> > It might not have been visibly broken without assertions enabled,\n> > though. I sprinkled nbtdedup.c with these _bt_posting_valid()\n> > assertions just because it was easy. The assertions were bound to\n> > catch some problem sooner or later, and had acceptable overhead.\n> \n> Obviously nothing stops you from running amcheck on the original\n> database that you're running in production. You won't need to have\n> enabled assertions to catch the same problem that way. This seems like\n> the best way to isolate the problem. I strongly suspect that it's the\n> LVM issue for my own reasons: nothing changed during the Postgres 14\n> cycle that seems truly related.\n\nSorry, but I didn't save the pre-upgrade cluster (just pg_dump).\n\nFor now, I moved the table out of the way and re-created it.\nI could send you the whole relnode if you wanted to look more..\nIt seemed like almost any insert on the table caused it to crash.\n\nBTW, on a copy of the v14 cluster, both vacuum and reindex also resolved the\nissue (at least enough to avoid the crash).\n\n-- \nJustin\n\n\n", "msg_date": "Mon, 28 Jun 2021 18:26:56 -0500", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": true, "msg_subject": "Re: pg14b2: FailedAssertion(\"_bt_posting_valid(nposting)\", File:\n \"nbtdedup.c\", ..." } ]
[ { "msg_contents": "I found a couple of places where numeric multiplication suffers from\noverflow errors for inputs that aren't necessarily very large in\nmagnitude.\n\nThe first is with the numeric * operator, which attempts to always\nproduce the exact result, even though the numeric type has a maximum\nof 16383 digits after the decimal point. If the limit is exceeded an\noverflow error is produced, e.g.:\n\nSELECT (1+2e-10000) * (1+3e-10000);\nERROR: value overflows numeric format\n\nI can't imagine anyone actually wanting that many digits after the\ndecimal point, but it can happen with a sequence of multiplications,\nwhere the number of digits after the decimal point grows with each\nmultiply. Throwing an error is not particularly useful, and that error\nmessage is quite misleading, since the result is not very large.\nTherefore I propose to make this round the result to 16383 digits if\nit exceeds that, as in the first attached patch.\n\nIt's worth noting that to get correct rounding, it's necessary to\ncompute the full exact product (which we're actually already doing)\nbefore rounding, as opposed to passing rscale=16383 to mul_var(), and\nletting it round. The latter approach would compute a truncated\nproduct with MUL_GUARD_DIGITS extra digits of precision, which doesn't\nnecessarily round the final digit the right way. The test case in the\npatch is an example that would round the wrong way with a truncated\nproduct.\n\nI considered doing the final rounding in mul_var() (after computing\nthe full product), to prevent such overflows for all callers, but I\nthink that's the wrong approach because it risks covering up other\nproblems, such as the following:\n\nWhile looking through the remaining numeric code, I found one other\nplace that has a similar problem -- when calculating the sum of\nsquares for aggregates like variance() and stddev(), the squares can\nend up with more than 16383 digits after the decimal point. When the\nquery is running on a single backend, that's no problem, because the\nfinal result is rounded to 1000 digits. However, if it uses parallel\nworkers, the result from each worker is sent using numeric_send/recv()\nwhich attempts to convert to numeric before sending. Thus it's\npossible to have an aggregate query that fails if it uses parallel\nworkers and succeeds otherwise.\n\nIn this case, I don't think that rounding the result from each worker\nis the right approach, since that can lead to the final result being\ndifferent depending on whether or not the query uses parallel workers.\nAlso, given that each worker is already doing the hard work of\ncomputing these squares, it seems a waste to just discard that\ninformation.\n\nSo the second patch fixes this by adding new numericvar_send/recv()\nfunctions capable of sending the full precision NumericVar's from each\nworker, without rounding. The first test case in this patch is an\nexample that would round the wrong way if the result from each worker\nwere rounded before being sent.\n\nAn additional benefit to this approach is that it also addresses the\nissue noted in the old code about its use of numeric_send/recv() being\nwasteful:\n\n /*\n * This is a little wasteful since make_result converts the NumericVar\n * into a Numeric and numeric_send converts it back again. Is it worth\n * splitting the tasks in numeric_send into separate functions to stop\n * this? Doing so would also remove the fmgr call overhead.\n */\n\nSo the patch converts all aggregate serialization/deserialization code\nto use the new numericvar_send/recv() functions. I doubt that that\ngives much in the way of a performance improvement, but it makes the\ncode a little neater as well as preventing overflows.\n\nAfter writing that, I realised that there's another overflow risk --\nif the input values are very large in magnitude, the sum of squares\ncould genuinely overflow the numeric type, while the final variance\ncould be quite small (and that can't be fixed by rounding). So this\ntoo fails when parallel workers are used, and succeeds otherwise, and\nthe patch fixes this too, so I added a separate test case for it.\n\nRegards,\nDean", "msg_date": "Mon, 28 Jun 2021 10:16:22 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Numeric multiplication overflow errors" }, { "msg_contents": "On Mon, 28 Jun 2021 at 21:16, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> So the second patch fixes this by adding new numericvar_send/recv()\n> functions capable of sending the full precision NumericVar's from each\n> worker, without rounding. The first test case in this patch is an\n> example that would round the wrong way if the result from each worker\n> were rounded before being sent.\n\nInstead of adding a send/recv function, unless I'm mistaken, it should\nbe possible to go the whole hog and optimizing this further by instead\nof having numericvar_send(), add:\n\nstatic void numericvar_serialize(StringInfo buf, const NumericVar *var)\n{\n /* guts of numericvar_send() here */\n}\n\nand then rename numericvar_recv to numericvar_deserialize.\n\nThat should allow the complexity to be reduced a bit further as it'll\nallow you to just serialize the NumericVar into the existing buffer\nrather than having the send function create a new one only to have the\ncaller copy it back out again into another buffer. It also allows you\nto get rid of the sumX and sumX2 vars from the serialize functions.\n\n> An additional benefit to this approach is that it also addresses the\n> issue noted in the old code about its use of numeric_send/recv() being\n> wasteful:\n>\n> /*\n> * This is a little wasteful since make_result converts the NumericVar\n> * into a Numeric and numeric_send converts it back again. Is it worth\n> * splitting the tasks in numeric_send into separate functions to stop\n> * this? Doing so would also remove the fmgr call overhead.\n> */\n>\n> So the patch converts all aggregate serialization/deserialization code\n> to use the new numericvar_send/recv() functions. I doubt that that\n> gives much in the way of a performance improvement, but it makes the\n> code a little neater as well as preventing overflows.\n\nI did mean to come back to that comment one day, so thanks for doing\nthis and for finding/fixing the overflow bugs.\n\nIt's unfortunate that we're up against Amdahl's law here. The best\ncases to parallelise have fewer groups, so we\nserialise/combine/deserialise fewer groups in the best cases meaning\nthe optimisation done here are not being executed as much as the\nnot-so-good case. So yeah, I agree that it might be difficult to\nmeasure.\n\nDavid\n\n\n", "msg_date": "Mon, 28 Jun 2021 23:26:50 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "Thanks for looking!\n\nOn Mon, 28 Jun 2021 at 12:27, David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> Instead of adding a send/recv function, unless I'm mistaken, it should\n> be possible to go the whole hog and optimizing this further by instead\n> of having numericvar_send(), add:\n>\n> static void numericvar_serialize(StringInfo buf, const NumericVar *var)\n> {\n> /* guts of numericvar_send() here */\n> }\n>\n> and then rename numericvar_recv to numericvar_deserialize.\n>\n> That should allow the complexity to be reduced a bit further as it'll\n> allow you to just serialize the NumericVar into the existing buffer\n> rather than having the send function create a new one only to have the\n> caller copy it back out again into another buffer. It also allows you\n> to get rid of the sumX and sumX2 vars from the serialize functions.\n\nYes, agreed. That simplifies the code nicely as well as saving a buffer copy.\n\nI'm not a fan of the *serialize() function names in numeric.c though\n(e.g., the name numeric_serialize() seems quite misleading for what it\nactually does). So rather than adding to those, I've kept the original\nnames. In the context where they're used, those names seem more\nnatural.\n\nRegards,\nDean", "msg_date": "Tue, 29 Jun 2021 11:29:01 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "Thanks for the updated patch\n\nOn Tue, 29 Jun 2021 at 22:29, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> I'm not a fan of the *serialize() function names in numeric.c though\n> (e.g., the name numeric_serialize() seems quite misleading for what it\n> actually does). So rather than adding to those, I've kept the original\n> names. In the context where they're used, those names seem more\n> natural.\n\nI've only looked at the numeric-agg-sumX2-overflow-v2.patch one and it\nall looks mostly ok.\n\nI kinda disagree with the send/recv naming since all you're using them\nfor is to serialise/deserialise the NumericVar. Functions named\n*send() and recv() I more expect to return a bytea so they can be used\nfor a type's send/recv function. I just don't have the same\nexpectations for functions named serialize/deserialize. That's all\npretty special to aggregates with internal states.\n\nOne other thing I can think of to mention is that the recv function\nfetches the digits with pq_getmsgint(buf, sizeof(NumericDigit))\nwhereas, the send function sends them with pq_sendint16(buf,\nvar->digits[i]). I understand you've just copied numeric_send/recv,\nbut I disagree with that too and think that both send functions should\nbe using pq_sendint. This would save having weird issues if someone\nwas to change the type of the NumericDigit. Perhaps that would cause\nother problems, but I don't think it's a good idea to bake those\nproblems in any further.\n\nI also wonder if numericvar_recv() really needs all the validation\ncode? We don't do any other validation during deserialisation. I see\nthe logic in doing this for a recv function since a client could send\nus corrupt data e.g. during a binary copy. There are currently no\nexternal factors to account for with serial/deserial.\n\nI'm also fine for that patch to go in as-is. I'm just pointing out\nwhat I noted down when looking over it. I'll let you choose if you\nwant to make any changes based on the above.\n\nDavid\n\n\n", "msg_date": "Thu, 1 Jul 2021 13:00:22 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": " patchOn Thu, 1 Jul 2021 at 13:00, David Rowley <dgrowleyml@gmail.com> wrote:\n> I've only looked at the numeric-agg-sumX2-overflow-v2.patch one and it\n> all looks mostly ok.\n\nI forgot to mention earlier, after looking at the code a bit more I\nwondered if we should just serialise the NumericSumAccum instead of\nthe NumericVar. If we're changing this, then maybe it's worth just\ndoing it once and making it as optimal as possible.\n\nRight now we translate the NumericSumAccum to a NumericVar in the\nserial function, only to translate it back again in the deserial\nfunction. This is a bit of a waste of CPU effort, although it might be\nfewer bytes to copy over to the main process. Since deserial can be\npretty hot if we've got a lot of workers throwing serialised aggregate\nstates at the main process as fast as they all can go. Reducing the\noverheads in the serial part of the query could provide some big wins.\n\nI played about with the following case:\n\ncreate table num (a int not null, b numeric not null, c numeric not\nnull, d numeric not null, e numeric not null, f numeric not null, g\nnumeric not null, h numeric not null);\ninsert into num select x,y,y,y,y,y,y,y from generate_series(1,1000000)\nx, generate_Series(1000000000,1000000099) y order by x;\nexplain analyze select\na,sum(b),sum(c),sum(d),sum(e),sum(f),sum(g),sum(h) from num group by\na;\n\nTo try and load up the main process as much as possible, I set 128\nworkers to run. The profile of the main process looked like:\n\nMaster:\n 14.10% postgres [.] AllocSetAlloc\n 7.03% postgres [.] accum_sum_carry.part.0\n 4.87% postgres [.] ExecInterpExpr\n 3.72% postgres [.] numeric_sum\n 3.52% postgres [.] accum_sum_copy\n 3.06% postgres [.] pq_getmsgint\n 2.95% postgres [.] palloc\n 2.82% postgres [.] ExecStoreMinimalTuple\n 2.62% postgres [.] accum_sum_add\n 2.60% postgres [.] make_result_opt_error\n 2.58% postgres [.] numeric_avg_deserialize\n 2.21% [kernel] [k] copy_user_generic_string\n 2.08% postgres [.] tuplehash_insert_hash_internal\n\nSo it is possible to get this stuff to show up.\n\nYour numeric-agg-sumX2-overflow-v2.patch patch speeds this up quite a\nbit already, so it makes me think it might be worth doing the extra\nwork to further reduce the overhead.\n\nMaster @ 3788c6678\n\nExecution Time: 8306.319 ms\nExecution Time: 8407.785 ms\nExecution Time: 8491.056 ms\n\nMaster + numeric-agg-sumX2-overflow-v2.patch\nExecution Time: 6633.278 ms\nExecution Time: 6657.350 ms\nExecution Time: 6568.184 ms\n\nA possible reason we might not want to do this is that we currently\ndon't have a NumericSumAccum for some functions when the compiler has\na working int128 type. At the moment we translate the int128\naccumulator into a NumericVar. We could just serialize the int128 type\nin those cases. It would just mean the serialised format is not as\nconsistent between different builds. We currently have nothing that\ndepends on them matching across different machines.\n\nDo you think it's worth doing this?\n\nDavid\n\n\n", "msg_date": "Thu, 1 Jul 2021 17:42:50 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "On Thu, 1 Jul 2021 at 02:00, David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> I kinda disagree with the send/recv naming since all you're using them\n> for is to serialise/deserialise the NumericVar. Functions named\n> *send() and recv() I more expect to return a bytea so they can be used\n> for a type's send/recv function. I just don't have the same\n> expectations for functions named serialize/deserialize. That's all\n> pretty special to aggregates with internal states.\n\nOK, on further reflection, I think it's best not to use the send/recv\nnames because those names suggest that these are the internal\nimplementations of the numeric_send/recv() functions, which they're\nnot.\n\n> One other thing I can think of to mention is that the recv function\n> fetches the digits with pq_getmsgint(buf, sizeof(NumericDigit))\n> whereas, the send function sends them with pq_sendint16(buf,\n> var->digits[i]). I understand you've just copied numeric_send/recv,\n> but I disagree with that too and think that both send functions should\n> be using pq_sendint.\n\nI have to disagree with that. pq_sendint() is marked as deprecated and\nalmost all callers of it have been removed. It looks like the original\nmotivation for that was performance (see 1de09ad8eb), but I prefer it\nthat way because it makes changing the binary format for sending data\nmore of a conscious choice.\n\nThat implies we should use pq_getmsgint(buf, sizeof(int16)) to read\nNumericDigit's, which I've done in numericvar_deserialize(), but I've\nleft numeric_recv() as it is -- these 2 functions have already\ndiverged now, and this patch is meant to be about fixing overflow\nerrors, so I don't want to add more scope-creep. Perhaps a follow-on\npatch could introduce pq_getmsgint8/16/32() functions, deprecate\npq_getmsgint(), and convert callers to use the new functions.\n\n> I also wonder if numericvar_recv() really needs all the validation\n> code? We don't do any other validation during deserialisation. I see\n> the logic in doing this for a recv function since a client could send\n> us corrupt data e.g. during a binary copy. There are currently no\n> external factors to account for with serial/deserial.\n\nOK, fair enough. That makes it more compact and efficient.\n\nI'll post an update in a while. Thanks for the review.\n\nRegards,\nDean\n\n\n", "msg_date": "Thu, 1 Jul 2021 10:27:00 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "On Thu, 1 Jul 2021 at 10:27, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n>\n> I'll post an update in a while. Thanks for the review.\n>\n\nOne other thing I'm wondering about is back-patching. I was originally\nthinking of these as back-patchable bug fixes, but changing the binary\nformat of the aggregate serialization states feels dodgy for a\nback-patch.\n\nBut then, these functions are only callable in an aggregate context,\nand work in pairs. It could be a problem if someone were using them to\npass state between different servers, but I see no evidence of them\nbeing used in that way.\n\nFor reference, this will affect the following:\n - int8_avg_serialize()\n - numeric_avg_serialize()\n - numeric_poly_serialize()\n - numeric_serialize()\n\nand the corresponding *_deserialize functions.\n\nRegards,\nDean\n\n\n", "msg_date": "Thu, 1 Jul 2021 11:02:51 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "On Thu, 1 Jul 2021 at 22:03, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> One other thing I'm wondering about is back-patching. I was originally\n> thinking of these as back-patchable bug fixes, but changing the binary\n> format of the aggregate serialization states feels dodgy for a\n> back-patch.\n\nI was wondering about that too. I'm not sure if any extensions might\nbe using serial/deserial functions to communicate over multiple\nservers. As far as I know, Citus does not do this and implements\naggregates like AVG(c) over multi-nodes with SUM(c) + COUNT(c). I'm\npretty sure Citus is not the only extension doing that kind of work.\nSo perhaps other people are using the serial/deserial functions.\n\nDavid\n\n\n", "msg_date": "Thu, 1 Jul 2021 22:44:46 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "On Thu, 1 Jul 2021 at 06:43, David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> Master @ 3788c6678\n>\n> Execution Time: 8306.319 ms\n> Execution Time: 8407.785 ms\n> Execution Time: 8491.056 ms\n>\n> Master + numeric-agg-sumX2-overflow-v2.patch\n> Execution Time: 6633.278 ms\n> Execution Time: 6657.350 ms\n> Execution Time: 6568.184 ms\n>\n\nHmm, I'm a bit surprised by those numbers. I wouldn't have expected it\nto be spending enough time in the serialization/deserialization code\nfor it to make such a difference. I was only able to measure a 2-3%\nperformance improvement with the same test, and that was barely above\nthe noise.\n\n> A possible reason we might not want to do this is that we currently\n> don't have a NumericSumAccum for some functions when the compiler has\n> a working int128 type. At the moment we translate the int128\n> accumulator into a NumericVar. We could just serialize the int128 type\n> in those cases. It would just mean the serialised format is not as\n> consistent between different builds. We currently have nothing that\n> depends on them matching across different machines.\n>\n> Do you think it's worth doing this?\n>\n\nI think it's probably not worth doing this. I remain sceptical that it\ncould give that much of a performance gain, and keeping the\nplatform-independent state might well be useful in the future.\n\nRegards,\nDean\n\n\n", "msg_date": "Thu, 1 Jul 2021 13:28:11 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "On Fri, 2 Jul 2021 at 00:28, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n>\n> On Thu, 1 Jul 2021 at 06:43, David Rowley <dgrowleyml@gmail.com> wrote:\n> >\n> > Master @ 3788c6678\n> >\n> > Execution Time: 8306.319 ms\n> > Execution Time: 8407.785 ms\n> > Execution Time: 8491.056 ms\n> >\n> > Master + numeric-agg-sumX2-overflow-v2.patch\n> > Execution Time: 6633.278 ms\n> > Execution Time: 6657.350 ms\n> > Execution Time: 6568.184 ms\n> >\n>\n> Hmm, I'm a bit surprised by those numbers. I wouldn't have expected it\n> to be spending enough time in the serialization/deserialization code\n> for it to make such a difference. I was only able to measure a 2-3%\n> performance improvement with the same test, and that was barely above\n> the noise.\n\nI ran this again with a few different worker counts after tuning a few\nmemory settings so there was no spilling to disk and so everything was\nin RAM. Mostly so I could get consistent results.\n\nHere's the results. Average over 3 runs on each:\n\nWorkers Master Patched Percent\n8 11094.1 11084.9 100.08%\n16 8711.4 8562.6 101.74%\n32 6961.4 6726.3 103.50%\n64 6137.4 5854.8 104.83%\n128 6090.3 5747.4 105.96%\n\nSo the gains are much less at lower worker counts. I think this is\nbecause most of the gains are in the serial part of the plan and with\nhigher worker counts that part of the plan is relatively much bigger.\n\nSo likely performance isn't too critical here, but it is something to\nkeep in mind.\n\nDavid\n\n\n", "msg_date": "Fri, 2 Jul 2021 21:23:52 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "On Fri, 2 Jul 2021 at 10:24, David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> I ran this again with a few different worker counts after tuning a few\n> memory settings so there was no spilling to disk and so everything was\n> in RAM. Mostly so I could get consistent results.\n>\n> Here's the results. Average over 3 runs on each:\n>\n> Workers Master Patched Percent\n> 8 11094.1 11084.9 100.08%\n> 16 8711.4 8562.6 101.74%\n> 32 6961.4 6726.3 103.50%\n> 64 6137.4 5854.8 104.83%\n> 128 6090.3 5747.4 105.96%\n>\n\nThanks for testing again. Those are nice looking results, and are much\nmore in line with what I was seeing.\n\n> So the gains are much less at lower worker counts. I think this is\n> because most of the gains are in the serial part of the plan and with\n> higher worker counts that part of the plan is relatively much bigger.\n>\n> So likely performance isn't too critical here, but it is something to\n> keep in mind.\n>\n\nYes, agreed. I suspect there's not much more that can be shaved off\nthis particular piece of code now though. Here's an update with the\nlast set of changes discussed.\n\nRegards,\nDean", "msg_date": "Fri, 2 Jul 2021 11:55:47 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "On Fri, 2 Jul 2021 at 22:55, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> Here's an update with the\n> last set of changes discussed.\n\nLooks good to me.\n\nJust the question of if we have any problems changing the serialized\nformat in the back branches. I'm not sure if that's something we've\ndone before. I only had a quick look of git blame in the\nserial/deserial functions and the only changes I really see apart from\na few cosmetic ones were a57d312a7 and 9cca11c91. Both of which just\nwent into master.\n\nDavid\n\n\n", "msg_date": "Fri, 2 Jul 2021 23:56:06 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "On Fri, 2 Jul 2021 at 12:56, David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Fri, 2 Jul 2021 at 22:55, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> > Here's an update with the\n> > last set of changes discussed.\n>\n> Looks good to me.\n\nThanks for the review and testing!\n\n> Just the question of if we have any problems changing the serialized\n> format in the back branches. I'm not sure if that's something we've\n> done before. I only had a quick look of git blame in the\n> serial/deserial functions and the only changes I really see apart from\n> a few cosmetic ones were a57d312a7 and 9cca11c91. Both of which just\n> went into master.\n\nThinking about this more, I think it's best not to risk back-patching.\nIt *might* be safe, but it's difficult to really be sure of that. The\nbug itself is pretty unlikely to ever happen in practice, hence the\nlack of prior complaints, and in fact I only found it by an\nexamination of the code. So it doesn't seem to be worth the risk.\n\nOTOH, the original bug, with numeric *, is one I have hit in practice,\nand the fix is trivial and low risk, so I would like to backpatch that\nfix.\n\nRegards,\nDean\n\n\n", "msg_date": "Sat, 3 Jul 2021 00:04:12 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "On Sat, 3 Jul 2021 at 11:04, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> Thinking about this more, I think it's best not to risk back-patching.\n> It *might* be safe, but it's difficult to really be sure of that. The\n> bug itself is pretty unlikely to ever happen in practice, hence the\n> lack of prior complaints, and in fact I only found it by an\n> examination of the code. So it doesn't seem to be worth the risk.\n\nThat seems like good logic to me. Perhaps we can reconsider that\ndecision if users complain about it.\n\nDavid\n\n\n", "msg_date": "Sun, 4 Jul 2021 20:42:48 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "On Sun, 4 Jul 2021 at 09:43, David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Sat, 3 Jul 2021 at 11:04, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> > Thinking about this more, I think it's best not to risk back-patching.\n> > It *might* be safe, but it's difficult to really be sure of that. The\n> > bug itself is pretty unlikely to ever happen in practice, hence the\n> > lack of prior complaints, and in fact I only found it by an\n> > examination of the code. So it doesn't seem to be worth the risk.\n>\n> That seems like good logic to me. Perhaps we can reconsider that\n> decision if users complain about it.\n\nThanks. Pushed to master only.\n\nI think the other part (avoiding overflows in numeric_mul) is fairly\nstraightforward and uncontentious, so barring objections, I'll push\nand back-patch it in a couple of days or so.\n\nRegards,\nDean", "msg_date": "Mon, 5 Jul 2021 11:04:29 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric multiplication overflow errors" } ]
[ { "msg_contents": "There are certain parts of code that laboriously initialize every field \nof a struct to (some spelling of) zero, even though the whole struct was \njust zeroed (by makeNode() or memset()) a few lines earlier. Besides \nbeing redundant, I find this hard to read in some situations because \nit's then very hard to tell what is different between different cases or \nbranches. The attached patch cleans up most of that. I left alone \ninstances where there are (nontrivial) comments attached to the \ninitializations or where there appeared to be some value in maintaining \nsymmetry. But a lot of it was just plain useless code, some clearly \ncopy-and-pasted repeatedly.\n\nNote \n<https://www.postgresql.org/message-id/flat/4c9f01be-9245-2148-b569-61a8562ef190@2ndquadrant.com> \nwhere we had a previous discussion about trimming down useless \ninitializations to zero.", "msg_date": "Mon, 28 Jun 2021 11:59:29 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Remove redundant initializations" }, { "msg_contents": "On Mon, 28 Jun 2021 at 21:59, Peter Eisentraut\n<peter.eisentraut@enterprisedb.com> wrote:\n>\n> There are certain parts of code that laboriously initialize every field\n> of a struct to (some spelling of) zero, even though the whole struct was\n> just zeroed (by makeNode() or memset()) a few lines earlier. Besides\n> being redundant, I find this hard to read in some situations because\n> it's then very hard to tell what is different between different cases or\n> branches.\n\nJust for information, there was a similar proposal in [1]. There were\nsome arguments for and against the idea. Might be worth reviewing.\n\nDavid\n\n[1] https://www.postgresql.org/message-id/flat/CAFjFpRdmx2oWdCrYcQuk9CZ7S9iTrKSziC%3D%3D6j0Agw4jdmvLng%40mail.gmail.com#ff36253217d67d5531f5b2017a0dbfd0\n\n\n", "msg_date": "Mon, 28 Jun 2021 23:40:02 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Remove redundant initializations" }, { "msg_contents": "> On 28 Jun 2021, at 11:59, Peter Eisentraut <peter.eisentraut@enterprisedb.com> wrote:\n> \n> There are certain parts of code that laboriously initialize every field of a struct to (some spelling of) zero, even though the whole struct was just zeroed (by makeNode() or memset()) a few lines earlier. Besides being redundant, I find this hard to read in some situations because it's then very hard to tell what is different between different cases or branches. The attached patch cleans up most of that. I left alone instances where there are (nontrivial) comments attached to the initializations or where there appeared to be some value in maintaining symmetry. But a lot of it was just plain useless code, some clearly copy-and-pasted repeatedly.\n\nI personally sort of like the initializations of Lists like the one below, even\nif redundant, since they then clearly stand out as being Lists.\n\n-\tfk_trigger->args = NIL;\n\nJust a matter of personal preference, but I find that those aid readability.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 13:57:24 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: Remove redundant initializations" }, { "msg_contents": "Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n> There are certain parts of code that laboriously initialize every field \n> of a struct to (some spelling of) zero, even though the whole struct was \n> just zeroed (by makeNode() or memset()) a few lines earlier.\n\nFWIW, I think that it's an intentional style choice to explicitly\ninitialize every field rather than relying on makeNode to have done so.\n\nThe primary case where I personally rely on that style is when adding a\nnew field to a struct. Currently it's possible to grep for some existing\nfield and add the new one beside it. Leaving out initializations by\nrelying on side-effects of makeNode makes that far riskier.\n\nA different aspect is the one you mention parenthetically, which is\nwhat values can we rely on to be all-zero-bits? Switching to this\nstyle will embed assumptions about that to a far greater degree than\nwe have now, making the code less robust against changes.\n\nI'm aware that there are opinions to the contrary, but I do not think\nthis is an improvement.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 28 Jun 2021 10:15:23 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Remove redundant initializations" }, { "msg_contents": "On Tue, 29 Jun 2021 at 02:15, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> The primary case where I personally rely on that style is when adding a\n> new field to a struct. Currently it's possible to grep for some existing\n> field and add the new one beside it. Leaving out initializations by\n> relying on side-effects of makeNode makes that far riskier.\n\nFWIW, I mostly grep for makeNode(NameOfNode) as I'm a bit mistrusting\nof if the random existing field name that I pick to grep for will\nproperly showing me all the locations I should touch.\n\nDavid\n\n\n", "msg_date": "Thu, 1 Jul 2021 00:42:53 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Remove redundant initializations" }, { "msg_contents": "David Rowley <dgrowleyml@gmail.com> writes:\n> On Tue, 29 Jun 2021 at 02:15, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> The primary case where I personally rely on that style is when adding a\n>> new field to a struct. Currently it's possible to grep for some existing\n>> field and add the new one beside it. Leaving out initializations by\n>> relying on side-effects of makeNode makes that far riskier.\n\n> FWIW, I mostly grep for makeNode(NameOfNode) as I'm a bit mistrusting\n> of if the random existing field name that I pick to grep for will\n> properly showing me all the locations I should touch.\n\nI tend to do that too, but it's not a foolproof thing either, since\nsome places use random memset's for the purpose.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 30 Jun 2021 09:28:17 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Remove redundant initializations" }, { "msg_contents": "On Mon, Jun 28, 2021 at 3:30 PM Peter Eisentraut\n<peter.eisentraut@enterprisedb.com> wrote:\n>\n> There are certain parts of code that laboriously initialize every field\n> of a struct to (some spelling of) zero, even though the whole struct was\n> just zeroed (by makeNode() or memset()) a few lines earlier. Besides\n> being redundant, I find this hard to read in some situations because\n> it's then very hard to tell what is different between different cases or\n> branches. The attached patch cleans up most of that. I left alone\n> instances where there are (nontrivial) comments attached to the\n> initializations or where there appeared to be some value in maintaining\n> symmetry. But a lot of it was just plain useless code, some clearly\n> copy-and-pasted repeatedly.\n>\n> Note\n> <https://www.postgresql.org/message-id/flat/4c9f01be-9245-2148-b569-61a8562ef190@2ndquadrant.com>\n> where we had a previous discussion about trimming down useless\n> initializations to zero.\n\nThe patch does not apply on Head anymore, could you rebase and post a\npatch. I'm changing the status to \"Waiting for Author\".\n\nRegards,\nVignesh\n\n\n", "msg_date": "Thu, 15 Jul 2021 17:33:17 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Remove redundant initializations" }, { "msg_contents": "I'm +1 for the $SUBJECT concept, mostly because I take longer to read code\nwhere immaterial zero-initialization lines are diluting the code. A quick\nscan of the patch content is promising. If there's a decision to move\nforward, I'm happy to review it more closely.\n\nOn Wed, Jun 30, 2021 at 09:28:17AM -0400, Tom Lane wrote:\n> David Rowley <dgrowleyml@gmail.com> writes:\n> > On Tue, 29 Jun 2021 at 02:15, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >> The primary case where I personally rely on that style is when adding a\n> >> new field to a struct. Currently it's possible to grep for some existing\n> >> field and add the new one beside it. Leaving out initializations by\n> >> relying on side-effects of makeNode makes that far riskier.\n> \n> > FWIW, I mostly grep for makeNode(NameOfNode) as I'm a bit mistrusting\n> > of if the random existing field name that I pick to grep for will\n> > properly showing me all the locations I should touch.\n> \n> I tend to do that too, but it's not a foolproof thing either, since\n> some places use random memset's for the purpose.\n\nI checked the first five matches of \"git grep ' = T_'\" to get a sense of code\nsites that skip makeNode(). Just one of those five initializes every field:\n\nrecordDependencyOnSingleRelExpr() builds RangeTblEntry, subset of fields\nEventTriggerCommonSetup() builds EventTriggerData, full fields\nvalidateForeignKeyConstraint() builds TriggerData, subset of fields\nExecBSInsertTriggers() builds TriggerData, subset of fields [many similar examples in trigger.c]\nExecBuildProjectionInfo() builds ExprState, subset of fields\n\nHence, I find we're already too inconsistent about \"explicitly initialize\nevery field\" to recommend \"grep for some existing field\". (Two participants\nin the 2018 thread made similar observations[1][2].) grepping T_NameOfNode\nand then makeNode(NameOfNode) is more reliable today, and $SUBJECT will not\ndecrease its reliability.\n\n[1] https://postgr.es/m/20180830045736.p3mrugcq2j367a3l@alap3.anarazel.de\n[2] https://postgr.es/m/CA+TgmoYPw3Y8ZKofseTpVbb8avy7v7JbjmG6BMe7cC+eOd7qVA@mail.gmail.com\n\n\n", "msg_date": "Sun, 12 Sep 2021 18:26:33 -0700", "msg_from": "Noah Misch <noah@leadboat.com>", "msg_from_op": false, "msg_subject": "Re: Remove redundant initializations" }, { "msg_contents": "On Sun, Sep 12, 2021 at 06:26:33PM -0700, Noah Misch wrote:\n> I'm +1 for the $SUBJECT concept, mostly because I take longer to read code\n> where immaterial zero-initialization lines are diluting the code. A quick\n> scan of the patch content is promising. If there's a decision to move\n> forward, I'm happy to review it more closely.\n\nThis has been sitting in the CF app for three weeks waiting on author,\nso I have marked this entry as RwF for now:\nhttps://commitfest.postgresql.org/34/3229/\n--\nMichael", "msg_date": "Fri, 1 Oct 2021 16:17:09 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Remove redundant initializations" } ]
[ { "msg_contents": "Here's the diff from a pgindent run. The results look kosher to me - I\nhad to do a little surgery on queryjumble.h due to it having an unused\ntypedef.\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com", "msg_date": "Mon, 28 Jun 2021 08:29:10 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": true, "msg_subject": "pgindent run" }, { "msg_contents": "On Tue, 29 Jun 2021 at 00:29, Andrew Dunstan <andrew@dunslane.net> wrote:\n> Here's the diff from a pgindent run.\n\n--- a/src/backend/commands/policy.c\n+++ b/src/backend/commands/policy.c\n@@ -587,65 +587,65 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid\nclassid, Oid policy_id)\n /* If any roles remain, update the policy entry. */\n if (num_roles > 0)\n {\n- /* This is the array for the new tuple */\n- role_ids = construct_array(role_oids, num_roles, OIDOID,\n- sizeof(Oid), true, TYPALIGN_INT);\n+ /* This is the array for the new tuple */\n+ role_ids = construct_array(role_oids, num_roles, OIDOID,\n+ sizeof(Oid), true, TYPALIGN_INT);\n\nI wasn't too sure about the status of this one. Michael did mention it\nin [1], but Tom mentioned that was on purpose to ease backpatching.\nI'm not too clear on if Tom intended it should stay unindented until\n\"rewriting that whole function in a little bit\".\n\nDavid\n\n[1] https://www.postgresql.org/message-id/YM0puvBnbBIZxJt2@paquier.xyz\n\n\n", "msg_date": "Tue, 29 Jun 2021 00:52:16 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: pgindent run" }, { "msg_contents": "\nOn 6/28/21 8:52 AM, David Rowley wrote:\n> On Tue, 29 Jun 2021 at 00:29, Andrew Dunstan <andrew@dunslane.net> wrote:\n>> Here's the diff from a pgindent run.\n> --- a/src/backend/commands/policy.c\n> +++ b/src/backend/commands/policy.c\n> @@ -587,65 +587,65 @@ RemoveRoleFromObjectPolicy(Oid roleid, Oid\n> classid, Oid policy_id)\n> /* If any roles remain, update the policy entry. */\n> if (num_roles > 0)\n> {\n> - /* This is the array for the new tuple */\n> - role_ids = construct_array(role_oids, num_roles, OIDOID,\n> - sizeof(Oid), true, TYPALIGN_INT);\n> + /* This is the array for the new tuple */\n> + role_ids = construct_array(role_oids, num_roles, OIDOID,\n> + sizeof(Oid), true, TYPALIGN_INT);\n>\n> I wasn't too sure about the status of this one. Michael did mention it\n> in [1], but Tom mentioned that was on purpose to ease backpatching.\n> I'm not too clear on if Tom intended it should stay unindented until\n> \"rewriting that whole function in a little bit\".\n>\n> David\n>\n> [1] https://www.postgresql.org/message-id/YM0puvBnbBIZxJt2@paquier.xyz\n\n\n\nI'll let Tom speak for himself, but I somewhat doubt he meant the code\nto stay badly indented for more than a short period of time.\nUnfortunately, while pgindent has code that allows protecting comments\nfrom being formatted, it doesn't have a similar mechanism for code AFAICT.\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 09:25:34 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": true, "msg_subject": "Re: pgindent run" }, { "msg_contents": "On 6/28/21 8:29 AM, Andrew Dunstan wrote:\n> Here's the diff from a pgindent run. The results look kosher to me - I\n> had to do a little surgery on queryjumble.h due to it having an unused\n> typedef.\n>\n>\n\n\nThis time run against the right branch ..\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com", "msg_date": "Mon, 28 Jun 2021 10:13:08 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": true, "msg_subject": "Re: pgindent run" }, { "msg_contents": "Andrew Dunstan <andrew@dunslane.net> writes:\n> On 6/28/21 8:52 AM, David Rowley wrote:\n>> I wasn't too sure about the status of this one. Michael did mention it\n>> in [1], but Tom mentioned that was on purpose to ease backpatching.\n>> I'm not too clear on if Tom intended it should stay unindented until\n>> \"rewriting that whole function in a little bit\".\n\n> I'll let Tom speak for himself, but I somewhat doubt he meant the code\n> to stay badly indented for more than a short period of time.\n\nI did not. If you can give me an hour or so, I'll get the patch\nI previously proposed [1] committed, and then this issue will go away.\n\n\t\t\tregards, tom lane\n\n[1] https://www.postgresql.org/message-id/1573181.1624220108%40sss.pgh.pa.us\n\n\n", "msg_date": "Mon, 28 Jun 2021 10:21:57 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: pgindent run" }, { "msg_contents": "I wrote:\n> Andrew Dunstan <andrew@dunslane.net> writes:\n>> I'll let Tom speak for himself, but I somewhat doubt he meant the code\n>> to stay badly indented for more than a short period of time.\n\n> I did not. If you can give me an hour or so, I'll get the patch\n> I previously proposed [1] committed, and then this issue will go away.\n\nWait ... I did already, at 5a0f1c8c0. Are you sure you were indenting\ncurrent HEAD?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 28 Jun 2021 10:44:42 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: pgindent run" }, { "msg_contents": "\nOn 6/28/21 10:44 AM, Tom Lane wrote:\n> I wrote:\n>> Andrew Dunstan <andrew@dunslane.net> writes:\n>>> I'll let Tom speak for himself, but I somewhat doubt he meant the code\n>>> to stay badly indented for more than a short period of time.\n>> I did not. If you can give me an hour or so, I'll get the patch\n>> I previously proposed [1] committed, and then this issue will go away.\n> Wait ... I did already, at 5a0f1c8c0. Are you sure you were indenting\n> current HEAD?\n>\n> \t\t\t\n\n\n\nNo, see revised patch. I posted at 10.13\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 10:50:56 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": true, "msg_subject": "Re: pgindent run" }, { "msg_contents": "Andrew Dunstan <andrew@dunslane.net> writes:\n> On 6/28/21 10:44 AM, Tom Lane wrote:\n>> Wait ... I did already, at 5a0f1c8c0. Are you sure you were indenting\n>> current HEAD?\n\n> No, see revised patch. I posted at 10.13\n\nRight, new version looks better.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 28 Jun 2021 10:56:30 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: pgindent run" } ]
[ { "msg_contents": "I happened to spot the below call in src/bin/pg_basebackup/streamutil.c which\nhas an unbounded %s in the format.\n\n /* fetch xlog value and unit from the result */\n if (sscanf(PQgetvalue(res, 0, 0), \"%d%s\", &xlog_val, xlog_unit) != 2)\n\nThere is no risk of overflow as the unit is defined to be at most 2 characters,\nbut that's not explained (like how a similar %s is handled in pg_dump). The\nattached adds a small explanation in the comment to save readers time from\nfollowing the bouncing ball to make sure.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/", "msg_date": "Mon, 28 Jun 2021 15:06:37 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": true, "msg_subject": "Unbounded %s in sscanf " }, { "msg_contents": "Daniel Gustafsson <daniel@yesql.se> writes:\n> I happened to spot the below call in src/bin/pg_basebackup/streamutil.c which\n> has an unbounded %s in the format.\n\n> /* fetch xlog value and unit from the result */\n> if (sscanf(PQgetvalue(res, 0, 0), \"%d%s\", &xlog_val, xlog_unit) != 2)\n\n> There is no risk of overflow as the unit is defined to be at most 2 characters,\n> but that's not explained (like how a similar %s is handled in pg_dump).\n\nUgh. Shouldn't we instead modify the format to read not more than\ntwo characters? Even if this is safe on non-malicious input, it\ndoesn't seem like good style.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 28 Jun 2021 10:02:46 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Unbounded %s in sscanf" }, { "msg_contents": "> On 28 Jun 2021, at 16:02, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n\n> Ugh. Shouldn't we instead modify the format to read not more than\n> two characters? Even if this is safe on non-malicious input, it\n> doesn't seem like good style.\n\nNo disagreement, I was only basing it on what is in the tree. I would propose\nthat we change the sscanf in _LoadBlobs() too though to eliminate all such\ncallsites, even though that one is even safer. I'll prepare a patch once more\ncaffeine has been ingested.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 16:45:53 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": true, "msg_subject": "Re: Unbounded %s in sscanf" }, { "msg_contents": "On 2021-Jun-28, Daniel Gustafsson wrote:\n\n> I happened to spot the below call in src/bin/pg_basebackup/streamutil.c which\n> has an unbounded %s in the format.\n> \n> /* fetch xlog value and unit from the result */\n> if (sscanf(PQgetvalue(res, 0, 0), \"%d%s\", &xlog_val, xlog_unit) != 2)\n> \n> There is no risk of overflow as the unit is defined to be at most 2 characters,\n> but that's not explained (like how a similar %s is handled in pg_dump). The\n> attached adds a small explanation in the comment to save readers time from\n> following the bouncing ball to make sure.\n\nCan you attack the system by crafting malicious output from the query?\nI think the answer is still no, because the output comes from the query\n SHOW wal_segment_size\nwhich, if the attacker can control, the person running pg_basebackup has\nway more serious problems.\n\nBut TBH it seems easy enough to limit to the output variable width.\n\n-- \n�lvaro Herrera 39�49'30\"S 73�17'W\n\"How amazing is that? I call it a night and come back to find that a bug has\nbeen identified and patched while I sleep.\" (Robert Davidson)\n http://archives.postgresql.org/pgsql-sql/2006-03/msg00378.php\n\n\n", "msg_date": "Mon, 28 Jun 2021 12:45:10 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Unbounded %s in sscanf" }, { "msg_contents": "> On 28 Jun 2021, at 16:45, Daniel Gustafsson <daniel@yesql.se> wrote:\n> \n>> On 28 Jun 2021, at 16:02, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> \n>> Ugh. Shouldn't we instead modify the format to read not more than\n>> two characters? Even if this is safe on non-malicious input, it\n>> doesn't seem like good style.\n> \n> No disagreement, I was only basing it on what is in the tree. I would propose\n> that we change the sscanf in _LoadBlobs() too though to eliminate all such\n> callsites, even though that one is even safer. I'll prepare a patch once more\n> caffeine has been ingested.\n\nReturning to this, attached is a patchset which amends the two sscanf()\ncallsites with their respective buffersizes for %s format parsing. In pg_dump\nwe need to inject the MAXPGPATH limit with the preprocessor and thus the buffer\nneeds to be increased by one to account for the terminator (which is yet more\nhygiene coding since the fname buffer is now larger than the input buffer).\n\nWhile in here, I noticed that the fname variable is shadowed in the loop\nparsing the blobs TOC which yields a broken error message on parse errors. The\nattached 0003 fixes that.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/", "msg_date": "Sat, 3 Jul 2021 22:18:54 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": true, "msg_subject": "Re: Unbounded %s in sscanf " }, { "msg_contents": "I took another look at this today, and propose to push the attached. The\npg_dump fix goes all the way back to 9.6 whereas the pg_basebackup fix is from\n11 and onwards. The adjacent shadowed variable bug in pg_dump is also present\nsince 9.6.\n\nThoughts?\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/", "msg_date": "Fri, 30 Jul 2021 17:48:49 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": true, "msg_subject": "Re: Unbounded %s in sscanf " }, { "msg_contents": "Daniel Gustafsson <daniel@yesql.se> writes:\n> I took another look at this today, and propose to push the attached. The\n> pg_dump fix goes all the way back to 9.6 whereas the pg_basebackup fix is from\n> 11 and onwards. The adjacent shadowed variable bug in pg_dump is also present\n> since 9.6.\n> Thoughts?\n\nGenerally +1, though I wonder if it'd be prudent to deal with the\nshadowed-variable bug by renaming *both* variables. \"fname\" is\nclearly too generic in a function that deals with multiple file\nnames.\n\nAnother thing that is nibbling at the back of my mind is that one\nreason we started to use src/port/snprintf.c all the time is that\nglibc's *printf functions behave in a very unfriendly fashion when\nasked to print text that they think is invalidly encoded, but only\nif the format involves an explicit field width spec. I wonder if\nwe're opening ourselves to similar problems if we start to use\nfield widths with *scanf. In principle, I think the input text\nalways ought to be ASCII in these cases, so that there's no hazard.\nBut is there an interesting security aspect here? That is, if someone\ncan inject a maliciously-crafted file containing non-ASCII data, what\nkind of misbehavior could ensue? It might be that sscanf would just\nreport failure and we'd give up, which would be fine. But if a\nstack overrun could be triggered that way, it'd not be fine.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 30 Jul 2021 12:03:59 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Unbounded %s in sscanf" }, { "msg_contents": "> On 30 Jul 2021, at 18:03, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> \n> Daniel Gustafsson <daniel@yesql.se> writes:\n>> I took another look at this today, and propose to push the attached. The\n>> pg_dump fix goes all the way back to 9.6 whereas the pg_basebackup fix is from\n>> 11 and onwards. The adjacent shadowed variable bug in pg_dump is also present\n>> since 9.6.\n>> Thoughts?\n\nReviving an old thread that had gotten lost in the mists of the INBOX.\n\n> Generally +1, though I wonder if it'd be prudent to deal with the\n> shadowed-variable bug by renaming *both* variables. \"fname\" is\n> clearly too generic in a function that deals with multiple file\n> names.\n\nGood point, done in the attached.\n\n> Another thing that is nibbling at the back of my mind is that one\n> reason we started to use src/port/snprintf.c all the time is that\n> glibc's *printf functions behave in a very unfriendly fashion when\n> asked to print text that they think is invalidly encoded, but only\n> if the format involves an explicit field width spec. I wonder if\n> we're opening ourselves to similar problems if we start to use\n> field widths with *scanf. In principle, I think the input text\n> always ought to be ASCII in these cases, so that there's no hazard.\n> But is there an interesting security aspect here? That is, if someone\n> can inject a maliciously-crafted file containing non-ASCII data, what\n> kind of misbehavior could ensue? It might be that sscanf would just\n> report failure and we'd give up, which would be fine. But if a\n> stack overrun could be triggered that way, it'd not be fine.\n\nsscanf won't fail in that case. For multibyte input, %xs will simply stop\nafter x bytes, ignoring torn characters with a (highly likely) incorrect value\nin the result variable. Using %xls (or %xS) would however count x towards the\nnumber of multibytes, which if stored in a normal char* variable could result\nin an overflow.\n\nWith a width specifier this isn't really a vector. If an attacker can inject\nmultibyte X which after a torn read results in z being parsed and acted upon,\nshe could also by definition inject z to begin with.\n\nWithout a width specifier, If a malicious actor manages to inject multibyte (or\njust too many bytes), it could however lead to a stack overflow as sscanf will\nkeep reading until a whitespace byte.\n\nI propose to apply the attached all the way down (with the basebackup hunk from\n11), or down to 10 if we want to be conservative with the final 9.6 re ancient\nbugs that haven't seen complaints.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/", "msg_date": "Fri, 15 Oct 2021 13:44:12 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": true, "msg_subject": "Re: Unbounded %s in sscanf" }, { "msg_contents": "Daniel Gustafsson <daniel@yesql.se> writes:\n> I propose to apply the attached all the way down (with the basebackup hunk from\n> 11), or down to 10 if we want to be conservative with the final 9.6 re ancient\n> bugs that haven't seen complaints.\n\nLGTM. No objection to applying this in 9.6.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 15 Oct 2021 10:07:21 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Unbounded %s in sscanf" } ]
[ { "msg_contents": "\nThe README for pg_indent says:\n\n\n    ./configure     # \"make\" will not work in an unconfigured tree\n    cd src/include/catalog\n    make reformat-dat-files\n    cd ../../..\n\n\nThis looks like a case of using a sledgehammer to crack a nut.\n\nI did this, which amounts to the same thing and is faster:\n\n    cd src/include/catalog\n    perl ./reformat_dat_file.pl --output . ./pg_*.dat\n    cd ../../..\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Mon, 28 Jun 2021 15:23:49 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": true, "msg_subject": "pg_indent instructions" }, { "msg_contents": "Andrew Dunstan <andrew@dunslane.net> writes:\n> The README for pg_indent says:\n\n>     ./configure     # \"make\" will not work in an unconfigured tree\n>     cd src/include/catalog\n>     make reformat-dat-files\n>     cd ../../..\n\n> This looks like a case of using a sledgehammer to crack a nut.\n\n> I did this, which amounts to the same thing and is faster:\n\n>     cd src/include/catalog\n>     perl ./reformat_dat_file.pl --output . ./pg_*.dat\n>     cd ../../..\n\nTrue, that saves having to do a configure run, but it also embeds\nsome assumptions that personally I could do without --- mainly\nabout which perl you want to use. In any case, configure is\npretty quick as long as you've got it set up to use a cache file.\n\nOne thing we should do, perhaps, is remove the \"cd\" steps in\nfavor of recommending\n\nmake -C src/include/catalog reformat-dat-files\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 28 Jun 2021 15:29:27 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: pg_indent instructions" } ]
[ { "msg_contents": "Hi,\n\nI want to add the '--ignore-errors' option into the pg_regress module.\n\nI understand it can't be used in the regression or TAP tests. But such \noption is useful to test a custom extension. A custom extension couldn't \npass all check-world tests and will be stopped at the end of first stage.\nMy use case of this feature is to run check-world, look for core files \nand analyze diffs of failed (but ignored) tests. Maybe it is not \ndifficult to apply this patch before the test in CI script, but annoying \nprocedure.\nI think this patch could be applied because it is trivial and can be \neasy reverted if needed.\n\nAn example:\nTEMP_CONFIG=/tmp/extra.config \\\nEXTRA_REGRESS_OPTS=\"--load-extension=aqo --ignore-errors \n--schedule=src/test/regress/parallel_schedule\" \\\nmake check-world\n\nMaybe I just don't know the right way?\n\n-- \nregards,\nAndrey Lepikhov\nPostgres Professional", "msg_date": "Tue, 29 Jun 2021 09:28:33 +0300", "msg_from": "Andrey Lepikhov <a.lepikhov@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Add '--ignore-errors' into pg_regress" }, { "msg_contents": "Andrey Lepikhov <a.lepikhov@postgrespro.ru> writes:\n> I want to add the '--ignore-errors' option into the pg_regress module.\n\n> I understand it can't be used in the regression or TAP tests. But such \n> option is useful to test a custom extension.\n\nI'm really skeptical that this has any positive use. It seems more\nlikely to be a foot-gun.\n\nAlso, pg_regress will already complete all the tests in a particular\nsuite, and I'm not clear on why you wouldn't try to get (say) the core\nsuite passing before trying something else. If the core suite has got\nproblems it seems unlikely that you can learn much from other suites.\n\nBTW, I wonder if you can't get much or all of the same effect\nfrom \"make -k check-world\".\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Tue, 29 Jun 2021 13:59:38 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Add '--ignore-errors' into pg_regress" }, { "msg_contents": "On 29/6/21 20:59, Tom Lane wrote:\n> Andrey Lepikhov <a.lepikhov@postgrespro.ru> writes:\n> BTW, I wonder if you can't get much or all of the same effect\n> from \"make -k check-world\".\nThank you, 'make -k' is suitable solution in such situation.\n\n-- \nregards,\nAndrey Lepikhov\nPostgres Professional\n\n\n", "msg_date": "Tue, 29 Jun 2021 21:05:11 +0300", "msg_from": "Andrey Lepikhov <a.lepikhov@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Add '--ignore-errors' into pg_regress" } ]
[ { "msg_contents": "Numeric x^y is supported for x < 0 if y is an integer, but this\ncurrently fails if y is outside the range of an int32:\n\nSELECT (-1.0) ^ 2147483647;\n ?column?\n---------------------\n -1.0000000000000000\n(1 row)\n\nSELECT (-1.0) ^ 2147483648;\nERROR: cannot take logarithm of a negative number\n\nbecause only the power_var_int() code path in power_var() handles\nnegative bases correctly. Attached is a patch to fix that.\n\nRegards,\nDean", "msg_date": "Tue, 29 Jun 2021 12:08:01 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Numeric x^y for negative x" }, { "msg_contents": "On Tue, 29 Jun 2021 at 12:08, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n>\n> Numeric x^y is supported for x < 0 if y is an integer, but this\n> currently fails if y is outside the range of an int32\n>\n\nI've been doing some more testing of this, and I spotted another\nproblem with numeric_power().\n\nThis is what happens when raising 0.9999999999 to increasingly large\npowers, which should decrease to zero:\n\n exp 0.9999999999^exp\n10000000000 0.3678794411530483\n100000000000 0.[4 zeros]4539992973978489\n1000000000000 0.[43 zeros]3720075957420456\n10000000000000 0.[434 zeros]5075958643751518\n20000000000000 0.[868 zeros]2576535615307575\n21000000000000 0.[912 zeros]9584908195943232\n22000000000000 0.[955 zeros]3565658653381070\n23000000000000 0.[998 zeros]13\n23100000000000 0.[1000 zeros]\n23200000000000 0.[1000 zeros]\n23300000000000 1.[1000 zeros] *** WRONG ***\n30000000000000 1.[1000 zeros] *** WRONG ***\n40000000000000 1.[1000 zeros] *** WRONG ***\n50000000000000 1.[1000 zeros] *** WRONG ***\n60000000000000 1.[1000 zeros] *** WRONG ***\n70000000000000 ERROR: value overflows numeric format\n\nThe cases where it returns 1 are a trivial logic bug in the\nlocal_rscale calculation in power_var() -- when it computes\nlocal_rscale from rscale and val, it needs to do so before clipping\nrscale to NUMERIC_MAX_DISPLAY_SCALE, otherwise it ends up setting\nlocal_rscale = 0, and loses all precision.\n\nI also don't think it should be throwing an overflow error here. Some\ncode paths through numeric_power() catch cases that would underflow,\nand return zero instead, but not all cases are caught. There's a\nsimilar overflow error with numeric_exp() for large negative inputs\n(-5999 returns 0, but -6000 overflows).\n\nIt's arguable though that numeric power() and exp() (and mul() for\nthat matter) should never return 0 for finite non-zero inputs, but\ninstead should throw underflow errors, which would make them\ncompatible with their floating-point counterparts. I don't think\nthat's useful though, and it's more likely to break people's code for\nno real benefit. No other numeric code throws underflow errors.\n\nSo I think we should just attempt to avoid all such overflow errors,\nthat are actually underflows, and return zero instead.\n\nRegards,\nDean\n\n\n", "msg_date": "Thu, 1 Jul 2021 14:17:46 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Thu, 1 Jul 2021 at 14:17, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n>\n> On Tue, 29 Jun 2021 at 12:08, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> >\n> > Numeric x^y is supported for x < 0 if y is an integer, but this\n> > currently fails if y is outside the range of an int32\n>\n> I've been doing some more testing of this, and I spotted another\n> problem with numeric_power().\n>\n> [loss of precision and overflow errors]\n>\n> I think we should attempt to avoid all such overflow errors,\n> that are actually underflows, and return zero instead.\n>\n\nFinally getting back to this ... attached is an updated patch that now\nincludes a fix for the loss-of-precision bug and the overflow errors.\nI don't think it's really worth trying to split these up, since\nthey're all somewhat interrelated.\n\nRegards,\nDean", "msg_date": "Wed, 7 Jul 2021 18:36:56 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Wed, Jul 7, 2021 at 10:37 AM Dean Rasheed <dean.a.rasheed@gmail.com>\nwrote:\n\n> On Thu, 1 Jul 2021 at 14:17, Dean Rasheed <dean.a.rasheed@gmail.com>\n> wrote:\n> >\n> > On Tue, 29 Jun 2021 at 12:08, Dean Rasheed <dean.a.rasheed@gmail.com>\n> wrote:\n> > >\n> > > Numeric x^y is supported for x < 0 if y is an integer, but this\n> > > currently fails if y is outside the range of an int32\n> >\n> > I've been doing some more testing of this, and I spotted another\n> > problem with numeric_power().\n> >\n> > [loss of precision and overflow errors]\n> >\n> > I think we should attempt to avoid all such overflow errors,\n> > that are actually underflows, and return zero instead.\n> >\n>\n> Finally getting back to this ... attached is an updated patch that now\n> includes a fix for the loss-of-precision bug and the overflow errors.\n> I don't think it's really worth trying to split these up, since\n> they're all somewhat interrelated.\n>\n> Regards,\n> Dean\n>\nHi,\n\n+ (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),\n+ errmsg(\"value overflows numeric format\")));\n\nHere is an example of existing error message which I think is more readable\nthan 'overflows numeric format':\n\n errmsg(\"bigint out of range\")));\n\nMaybe rephrase as: value is out of range\n\nCheers\n\nOn Wed, Jul 7, 2021 at 10:37 AM Dean Rasheed <dean.a.rasheed@gmail.com> wrote:On Thu, 1 Jul 2021 at 14:17, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n>\n> On Tue, 29 Jun 2021 at 12:08, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> >\n> > Numeric x^y is supported for x < 0 if y is an integer, but this\n> > currently fails if y is outside the range of an int32\n>\n> I've been doing some more testing of this, and I spotted another\n> problem with numeric_power().\n>\n> [loss of precision and overflow errors]\n>\n> I think we should attempt to avoid all such overflow errors,\n> that are actually underflows, and return zero instead.\n>\n\nFinally getting back to this ... attached is an updated patch that now\nincludes a fix for the loss-of-precision bug and the overflow errors.\nI don't think it's really worth trying to split these up, since\nthey're all somewhat interrelated.\n\nRegards,\nDeanHi,+                   (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),+                    errmsg(\"value overflows numeric format\")));Here is an example of existing error message which I think is more readable than 'overflows numeric format':                 errmsg(\"bigint out of range\"))); Maybe rephrase as: value is out of rangeCheers", "msg_date": "Wed, 7 Jul 2021 11:02:31 -0700", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": false, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Wed, 7 Jul 2021 at 18:57, Zhihong Yu <zyu@yugabyte.com> wrote:\n>\n> + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),\n> + errmsg(\"value overflows numeric format\")));\n>\n> Here is an example of existing error message which I think is more readable than 'overflows numeric format':\n>\n> errmsg(\"bigint out of range\")));\n>\n> Maybe rephrase as: value is out of range\n>\n\nHmm, I don't know. That's the error that has been thrown by lots of\nnumeric functions for a long time now, and it seems fine to me.\n\nRegards,\nDean\n\n\n", "msg_date": "Wed, 7 Jul 2021 19:42:43 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Wed, 7 Jul 2021 18:36:56 +0100\nDean Rasheed <dean.a.rasheed@gmail.com> wrote:\n\n> On Thu, 1 Jul 2021 at 14:17, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> >\n> > On Tue, 29 Jun 2021 at 12:08, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> > >\n> > > Numeric x^y is supported for x < 0 if y is an integer, but this\n> > > currently fails if y is outside the range of an int32\n> >\n> > I've been doing some more testing of this, and I spotted another\n> > problem with numeric_power().\n> >\n> > [loss of precision and overflow errors]\n> >\n> > I think we should attempt to avoid all such overflow errors,\n> > that are actually underflows, and return zero instead.\n> >\n> \n> Finally getting back to this ... attached is an updated patch that now\n> includes a fix for the loss-of-precision bug and the overflow errors.\n> I don't think it's really worth trying to split these up, since\n> they're all somewhat interrelated.\n\nThe patch can be applied cleanly.\n(Though, I need to remove lines \"new file mode 100644\" else I get an error\n \"error: git apply: bad git-diff - expected /dev/null on line 4\".)\n\nCompilation succeeded, and all tests passed.\n\nThis patch fixes numeric_power() to handle negative bases correctly and not\nto raise an error \"cannot take logarithm of a negative number\", as well as a\nbug that a result whose values is almost zero is incorrectly returend as 1.\nThe previous behaviors are obvious strange, and these fixes seem to me reasonable.\n\nAlso, improper overflow errors are corrected in numeric_power() and\nnumeric_exp() to return 0 when it is underflow in fact.\nI think it is no problem that these functions return zero instead of underflow\nerrors because power_var_int() already do the same.\n\nThe patch includes additional tests for checking negative bases cases and\nunderflow and rounding of the almost zero results. It seems good.\n\nLet me just make one comment.\n\n (errcode(ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION),\n errmsg(\"zero raised to a negative power is undefined\")));\n\n- if (sign1 < 0 && !numeric_is_integral(num2))\n- ereport(ERROR,\n- (errcode(ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION),\n- errmsg(\"a negative number raised to a non-integer power yields a complex result\")));\n-\n /*\n * Initialize things\n */\n\nI don't think we need to move this check from numeric_power to power_var.\nI noticed the following comment in a numeric_power(). \n\n /* \n * The SQL spec requires that we emit a particular SQLSTATE error code for\n * certain error conditions. Specifically, we don't return a\n * divide-by-zero error code for 0 ^ -1.\n */\n\nIn the original code, two checks that could raise an error of\nERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION are following the comment.\nI think these check codes are placed together under this comment intentionally,\nso I suggest not to move one of them.\n\n\nRegards,\nYugo Nagata\n\n-- \nYugo NAGATA <nagata@sraoss.co.jp>\n\n\n", "msg_date": "Tue, 20 Jul 2021 18:15:09 +0900", "msg_from": "Yugo NAGATA <nagata@sraoss.co.jp>", "msg_from_op": false, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Tue, 20 Jul 2021 at 10:17, Yugo NAGATA <nagata@sraoss.co.jp> wrote:\n>\n> This patch fixes numeric_power() to handle negative bases correctly and not\n> to raise an error \"cannot take logarithm of a negative number\", as well as a\n> bug that a result whose values is almost zero is incorrectly returend as 1.\n> The previous behaviors are obvious strange, and these fixes seem to me reasonable.\n>\n> Also, improper overflow errors are corrected in numeric_power() and\n> numeric_exp() to return 0 when it is underflow in fact.\n> I think it is no problem that these functions return zero instead of underflow\n> errors because power_var_int() already do the same.\n>\n> The patch includes additional tests for checking negative bases cases and\n> underflow and rounding of the almost zero results. It seems good.\n\nThanks for the review!\n\n\n> Let me just make one comment.\n>\n> (errcode(ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION),\n> errmsg(\"zero raised to a negative power is undefined\")));\n>\n> - if (sign1 < 0 && !numeric_is_integral(num2))\n> - ereport(ERROR,\n> - (errcode(ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION),\n> - errmsg(\"a negative number raised to a non-integer power yields a complex result\")));\n> -\n> /*\n> * Initialize things\n> */\n>\n> I don't think we need to move this check from numeric_power to power_var.\n\nMoving it to power_var() means that it only needs to be checked in the\ncase of a negative base, together with an exponent that cannot be\nhandled by power_var_int(), which saves unnecessary checking. It isn't\nnecessary to do this test at all if the exponent is an integer small\nenough to fit in a 32-bit int. And if it's not an integer, or it's a\nlarger integer than that, it seems more logical to do the test in\npower_var() near to the other code handling that case.\n\n\n> I noticed the following comment in a numeric_power().\n>\n> /*\n> * The SQL spec requires that we emit a particular SQLSTATE error code for\n> * certain error conditions. Specifically, we don't return a\n> * divide-by-zero error code for 0 ^ -1.\n> */\n>\n> In the original code, two checks that could raise an error of\n> ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION are following the comment.\n> I think these check codes are placed together under this comment intentionally,\n> so I suggest not to move one of them.\n\nAh, that's a good point about the SQL spec. The comment only refers to\nthe case of 0 ^ -1, but the SQL spec does indeed say that a negative\nnumber to a non-integer power should return the same error code.\n\nHere is an updated patch with additional comments about the required\nerror code when raising a negative number to a non-integer power, and\nwhere it is checked.\n\nRegards,\nDean", "msg_date": "Wed, 21 Jul 2021 11:10:16 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Wed, 21 Jul 2021 11:10:16 +0100\nDean Rasheed <dean.a.rasheed@gmail.com> wrote:\n\n> On Tue, 20 Jul 2021 at 10:17, Yugo NAGATA <nagata@sraoss.co.jp> wrote:\n> >\n> > This patch fixes numeric_power() to handle negative bases correctly and not\n> > to raise an error \"cannot take logarithm of a negative number\", as well as a\n> > bug that a result whose values is almost zero is incorrectly returend as 1.\n> > The previous behaviors are obvious strange, and these fixes seem to me reasonable.\n> >\n> > Also, improper overflow errors are corrected in numeric_power() and\n> > numeric_exp() to return 0 when it is underflow in fact.\n> > I think it is no problem that these functions return zero instead of underflow\n> > errors because power_var_int() already do the same.\n> >\n> > The patch includes additional tests for checking negative bases cases and\n> > underflow and rounding of the almost zero results. It seems good.\n> \n> Thanks for the review!\n> \n> \n> > Let me just make one comment.\n> >\n> > (errcode(ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION),\n> > errmsg(\"zero raised to a negative power is undefined\")));\n> >\n> > - if (sign1 < 0 && !numeric_is_integral(num2))\n> > - ereport(ERROR,\n> > - (errcode(ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION),\n> > - errmsg(\"a negative number raised to a non-integer power yields a complex result\")));\n> > -\n> > /*\n> > * Initialize things\n> > */\n> >\n> > I don't think we need to move this check from numeric_power to power_var.\n> \n> Moving it to power_var() means that it only needs to be checked in the\n> case of a negative base, together with an exponent that cannot be\n> handled by power_var_int(), which saves unnecessary checking. It isn't\n> necessary to do this test at all if the exponent is an integer small\n> enough to fit in a 32-bit int. And if it's not an integer, or it's a\n> larger integer than that, it seems more logical to do the test in\n> power_var() near to the other code handling that case.\n\nIndeed, I agree with that this change saves unnecessary checking.\n\n> \n> > I noticed the following comment in a numeric_power().\n> >\n> > /*\n> > * The SQL spec requires that we emit a particular SQLSTATE error code for\n> > * certain error conditions. Specifically, we don't return a\n> > * divide-by-zero error code for 0 ^ -1.\n> > */\n> >\n> > In the original code, two checks that could raise an error of\n> > ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION are following the comment.\n> > I think these check codes are placed together under this comment intentionally,\n> > so I suggest not to move one of them.\n> \n> Ah, that's a good point about the SQL spec. The comment only refers to\n> the case of 0 ^ -1, but the SQL spec does indeed say that a negative\n> number to a non-integer power should return the same error code.\n> \n> Here is an updated patch with additional comments about the required\n> error code when raising a negative number to a non-integer power, and\n> where it is checked.\n\nThank you for updating the patch. I am fine with the additional comments.\nI don't think there is any other problem left, so I marked it Ready-for-Committers.\n\nRegards,\nYugo Nagata\n\n-- \nYugo NAGATA <nagata@sraoss.co.jp>\n\n\n", "msg_date": "Thu, 22 Jul 2021 14:11:36 +0900", "msg_from": "Yugo NAGATA <nagata@sraoss.co.jp>", "msg_from_op": false, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Thu, 22 Jul 2021 at 06:13, Yugo NAGATA <nagata@sraoss.co.jp> wrote:\n>\n> Thank you for updating the patch. I am fine with the additional comments.\n> I don't think there is any other problem left, so I marked it Ready-for-Committers.\n>\n\nThanks for looking. Barring any further comments, I'll push this in a few days.\n\nRegards,\nDean\n\n\n", "msg_date": "Thu, 22 Jul 2021 16:19:35 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Thu, 22 Jul 2021 at 16:19, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n>\n> On Thu, 22 Jul 2021 at 06:13, Yugo NAGATA <nagata@sraoss.co.jp> wrote:\n> >\n> > Thank you for updating the patch. I am fine with the additional comments.\n> > I don't think there is any other problem left, so I marked it Ready-for-Committers.\n>\n> Thanks for looking. Barring any further comments, I'll push this in a few days.\n>\n\nSo I have been testing this a lot over the last couple of days, and I\nhave concluded that the patch works well as far as it goes, but I did\nmanage to construct another case where numeric_power() loses\nprecision. I think, though, that it would be better to tackle that as\na separate patch.\n\nIn writing the commit message for this patch, I realised that it was\npossible to tidy up the local_rscale calculation part of it a little,\nto make it more obvious what was going wrong, so attached is a\nslightly tweaked version. I'll hold off on committing it for a few\nmore days in case anyone else wants to have a look. Tom?\n\nThe other issue I found is related to the first part of power_var(),\nwhere it does a low-precision calculation to get an estimate for the\nweight of the result. It occurred to me that for certain input bases,\nthat calculation could be made to be quite inaccurate, and therefore\nlead to choosing the wrong rscale later. This is the test case I\nconstructed:\n\n (1-1.5123456789012345678e-1000) ^ 1.15e1003\n\nHere, the base is a sliver under 1, and so ln(base) is approximately\n-1.5e-1000, and ln_dweight is -1000 (the decimal weight of ln(base)).\nThe problem is that the local_rscale used for the first low-precision\ncalculation is limited to NUMERIC_MAX_DISPLAY_SCALE (which is 1000),\nso we only compute ln_base to a scale of 1000 at that stage, and the\nresult is rounded to exactly 2e-1000, which is off by a factor of\naround 1.333333.\n\nThat makes it think the result weight will be -998, when actually it's\n-755, so it then chooses a local_rscale for the full calculation\nthat's far too small, and the result is very inaccurate.\n\nTo fix this, I think it's necessary to remove the line that limits the\ninitial local_rscale. I tried that in a debugger, and managed to get a\nresult that agreed with the result from \"bc -l\" with a scale of 2000.\n\nThe reason I think it will be OK to remove that line is that it only\never comes into play when ln_dweight is a large negative number (and\nthe smallest it can be is -16383). But that can only happen in\ninstances like this, where the base is very very close to 1. In such\ncases, the ln(base) calculation is very fast, because it basically\nonly has to do a couple of Taylor series terms, and it's done. This\nwill still only be a low-precision estimate of the result (about 8\nsignificant digits, shifted down a long way).\n\nIt might also be necessary to re-think the choice of local_rscale for\nthe mul_var() that follows. If the weight of exp is much larger than\nthe weight of ln_base (or vice versa), it doesn't really make sense to\ncompute the product to the same local_rscale. That might be a source\nof other inaccuracies. I'll try to investigate some more.\n\nAnyway, I don't think any of that should get in the way of committing\nthe current patch.\n\nRegards,\nDean", "msg_date": "Thu, 29 Jul 2021 19:14:05 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "Dean Rasheed <dean.a.rasheed@gmail.com> writes:\n> On Thu, 22 Jul 2021 at 16:19, Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n>> Thanks for looking. Barring any further comments, I'll push this in a few days.\n\n> So I have been testing this a lot over the last couple of days, and I\n> have concluded that the patch works well as far as it goes, but I did\n> manage to construct another case where numeric_power() loses\n> precision. I think, though, that it would be better to tackle that as\n> a separate patch.\n\nIt looks like castoroides is not happy with this patch:\n\nhttps://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=castoroides&dt=2021-08-01%2008%3A52%3A43\n\nMaybe there's some hidden C99 dependency in what you did?\nAlthough pademelon, which is one of our other pre-C99\ndinosaurs, doesn't seem to be unhappy.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 05 Aug 2021 12:04:39 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Thu, 5 Aug 2021 at 17:04, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> It looks like castoroides is not happy with this patch:\n>\n> https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=castoroides&dt=2021-08-01%2008%3A52%3A43\n>\n> Maybe there's some hidden C99 dependency in what you did?\n> Although pademelon, which is one of our other pre-C99\n> dinosaurs, doesn't seem to be unhappy.\n>\n\nHmm, there's something very weird going on there. The 0.9999999999 ^\n70000000000000 test, for example, is one that would have thrown an\noverflow error before, but it's not doing that.\n\nSo somehow, when it hits the overflow/underflow test, Abs(val) is not\ngreater than NUMERIC_MAX_RESULT_SCALE * 3.01, which is 6020. The thing\nis, when I step through it, I get val = -7000, which should trigger\nthat comfortably. Even if I play with the return value from\nestimate_ln_dweight(), which relies on some double precision\narithmetic, making it -11 or -9 instead of -10, I still get val =\n-7000. And even if I force val to be -6000, or even val = 0, so that\nit doesn't trigger the overflow/underflow test, it still returns zero\nin the end. The end result in this case just isn't very sensitive to\nchanges in these values.\n\nSo I'm wondering if it's somehow not even getting that far. Maybe if\nthe earlier test to see if exp can be represented as an integer is\nfailing, it might be going through power_var_int() instead, which\nwould explain it returning a non-zero value. That hypothesis would be\neasy to test, by changing the test to 0.9999999999 ^ 70000000000000.5.\n\nIn any case, it would be interesting to see what castoroides returns for\n\nselect 0.9999999999 ^ 70000000000000;\n\nand\n\nselect 0.9999999999 ^ 70000000000000.5;\n\nRegards,\nDean\n\n\n", "msg_date": "Thu, 5 Aug 2021 19:27:16 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "Dean Rasheed <dean.a.rasheed@gmail.com> writes:\n> On Thu, 5 Aug 2021 at 17:04, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> It looks like castoroides is not happy with this patch:\n>> https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=castoroides&dt=2021-08-01%2008%3A52%3A43\n\n> Hmm, there's something very weird going on there.\n\nYeah. I tried to reproduce this on the gcc compile farm's Solaris 10\nmachine, but the test passed fine for me. The only obvious configuration\ndifference I can find is that that machine has\n\n$ cc -V\ncc: Sun C 5.10 SunOS_sparc Patch 141861-10 2012/11/07\n\nwhereas castorides' compiler seems to be a few years older. So this\ndoes seem like it could be a compiler bug.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 05 Aug 2021 22:58:04 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Fri, 6 Aug 2021 at 03:58, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> Dean Rasheed <dean.a.rasheed@gmail.com> writes:\n> > On Thu, 5 Aug 2021 at 17:04, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >> It looks like castoroides is not happy with this patch:\n> >> https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=castoroides&dt=2021-08-01%2008%3A52%3A43\n>\n> > Hmm, there's something very weird going on there.\n>\n> Yeah. I tried to reproduce this on the gcc compile farm's Solaris 10\n> machine, but the test passed fine for me. The only obvious configuration\n> difference I can find is that that machine has\n>\n> $ cc -V\n> cc: Sun C 5.10 SunOS_sparc Patch 141861-10 2012/11/07\n>\n> whereas castorides' compiler seems to be a few years older. So this\n> does seem like it could be a compiler bug.\n>\n\nAh, so the latest test results from castoroides confirm my previous\nhypothesis, that it isn't even reaching the new code in power_var():\n\n 0.9999999999 ^ 23300000000000 returned 1.0199545627709647\n 0.9999999999 ^ 70000000000000 returned 0.9396000441558118\n\nwhich are actually the results you'd get if you just cast the exponent\nto an int32, throwing away the top 32 bits and compute the results:\n\n 0.9999999999 ^ -197580800 = 1.0199545627709647\n 0.9999999999 ^ 623009792 = 0.9396000441558118\n\nSo the \"test for overflow by reverse-conversion\" obviously isn't\nworking as intended, and it's going through power_var_int() when it\nshouldn't. I don't think there's anything wrong with that code, so I\nthink this is a compiler bug.\n\nI guess the best thing to do is just test the value against\nPG_INT32_MIN/MAX, which is what int84() does. There are 2 other places\nin numeric.c that use similar code to check for int16/32 overflow, so\nit's possible that they're broken in the same way on that platform,\nbut they aren't covered by the regression tests, so it's also possible\nthat they're OK. Anyway, something like the attached seems likely to\nbe safer.\n\nRegards,\nDean", "msg_date": "Fri, 6 Aug 2021 17:06:20 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "Dean Rasheed <dean.a.rasheed@gmail.com> writes:\n> So the \"test for overflow by reverse-conversion\" obviously isn't\n> working as intended, and it's going through power_var_int() when it\n> shouldn't. I don't think there's anything wrong with that code, so I\n> think this is a compiler bug.\n\nYeah, looks like one.\n\n> I guess the best thing to do is just test the value against\n> PG_INT32_MIN/MAX, which is what int84() does. There are 2 other places\n> in numeric.c that use similar code to check for int16/32 overflow, so\n> it's possible that they're broken in the same way on that platform,\n> but they aren't covered by the regression tests, so it's also possible\n> that they're OK. Anyway, something like the attached seems likely to\n> be safer.\n\nLooks plausible by eyeball (I've not tested).\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 06 Aug 2021 12:15:18 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Fri, 6 Aug 2021 at 17:15, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> > I guess the best thing to do is just test the value against\n> > PG_INT32_MIN/MAX, which is what int84() does. There are 2 other places\n> > in numeric.c that use similar code to check for int16/32 overflow, so\n> > it's possible that they're broken in the same way on that platform,\n> > but they aren't covered by the regression tests, so it's also possible\n> > that they're OK. Anyway, something like the attached seems likely to\n> > be safer.\n>\n> Looks plausible by eyeball (I've not tested).\n>\n\nSo, I have back-branch patches for this ready to go. The question is,\nis it better to push now, or wait until after next week's releases?\n\nRegards,\nDean\n\n\n", "msg_date": "Fri, 6 Aug 2021 21:23:39 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "Dean Rasheed <dean.a.rasheed@gmail.com> writes:\n> On Fri, 6 Aug 2021 at 17:15, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> Looks plausible by eyeball (I've not tested).\n\n> So, I have back-branch patches for this ready to go. The question is,\n> is it better to push now, or wait until after next week's releases?\n\nI'd push now, given we have a failing buildfarm member.\n\nAdmittedly, there may be nobody else using that compiler out in\nthe real world, but we don't know that.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 06 Aug 2021 16:26:06 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Fri, 6 Aug 2021 at 21:26, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> Dean Rasheed <dean.a.rasheed@gmail.com> writes:\n> > On Fri, 6 Aug 2021 at 17:15, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >> Looks plausible by eyeball (I've not tested).\n>\n> > So, I have back-branch patches for this ready to go. The question is,\n> > is it better to push now, or wait until after next week's releases?\n>\n> I'd push now, given we have a failing buildfarm member.\n>\n> Admittedly, there may be nobody else using that compiler out in\n> the real world, but we don't know that.\n>\n\nOK. Will do.\n\nRegards,\nDean\n\n\n", "msg_date": "Fri, 6 Aug 2021 21:27:03 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Fri, Aug 06, 2021 at 09:27:03PM +0100, Dean Rasheed wrote:\n> On Fri, 6 Aug 2021 at 21:26, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >\n> > Dean Rasheed <dean.a.rasheed@gmail.com> writes:\n> > > On Fri, 6 Aug 2021 at 17:15, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> > >> Looks plausible by eyeball (I've not tested).\n> >\n> > > So, I have back-branch patches for this ready to go. The question is,\n> > > is it better to push now, or wait until after next week's releases?\n> >\n> > I'd push now, given we have a failing buildfarm member.\n> >\n> > Admittedly, there may be nobody else using that compiler out in\n> > the real world, but we don't know that.\n> >\n> \n> OK. Will do.\n> \n\nHi Dean,\n\nIt seems you already committed this. But it's still as \"Ready for\ncommitter\" in the commitfest app. \n\nAre we waiting for something else or we can mark it as committed?\n\n-- \nJaime Casanova\nDirector de Servicios Profesionales\nSystemGuards - Consultores de PostgreSQL\n\n\n", "msg_date": "Wed, 1 Sep 2021 18:39:17 -0500", "msg_from": "Jaime Casanova <jcasanov@systemguards.com.ec>", "msg_from_op": false, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Thu, 2 Sept 2021 at 00:39, Jaime Casanova\n<jcasanov@systemguards.com.ec> wrote:\n>\n> Hi Dean,\n>\n> It seems you already committed this. But it's still as \"Ready for\n> committer\" in the commitfest app.\n>\n> Are we waiting for something else or we can mark it as committed?\n>\n\nIt's mostly done, but there is one more corner case where it loses\nprecision. I'll post an update shortly.\n\nRegards,\nDean\n\n\n", "msg_date": "Thu, 2 Sep 2021 07:27:09 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Thu, Sep 02, 2021 at 07:27:09AM +0100, Dean Rasheed wrote:\n> On Thu, 2 Sept 2021 at 00:39, Jaime Casanova\n> <jcasanov@systemguards.com.ec> wrote:\n> >\n> > Hi Dean,\n> >\n> > It seems you already committed this. But it's still as \"Ready for\n> > committer\" in the commitfest app.\n> >\n> > Are we waiting for something else or we can mark it as committed?\n> >\n> \n> It's mostly done, but there is one more corner case where it loses\n> precision. I'll post an update shortly.\n> \n\nGreat! I'm marking this as \"waiting on author\".\n\n-- \nJaime Casanova\nDirector de Servicios Profesionales\nSystemGuards - Consultores de PostgreSQL\n\n\n", "msg_date": "Fri, 10 Sep 2021 12:31:20 -0500", "msg_from": "Jaime Casanova <jcasanov@systemguards.com.ec>", "msg_from_op": false, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Thu, Sep 02, 2021 at 07:27:09AM +0100, Dean Rasheed wrote:\n>\n> It's mostly done, but there is one more corner case where it loses\n> precision. I'll post an update shortly.\n>\n\nI spent some more time looking at this, testing a variety of edge\ncases, and the only case I could find that produces inaccurate results\nwas the one I noted previously -- computing x^y when x is very close\nto 1 (less than around 1e-1000 away from it, so that ln_dweight is\nless than around -1000). In this case, it loses precision due to the\nway local_rscale is set for the initial low-precision calculation:\n\n local_rscale = 8 - ln_dweight;\n local_rscale = Max(local_rscale, NUMERIC_MIN_DISPLAY_SCALE);\n local_rscale = Min(local_rscale, NUMERIC_MAX_DISPLAY_SCALE);\n\nThis needs to be allowed to be greater than NUMERIC_MAX_DISPLAY_SCALE\n(1000), otherwise the approximate result will lose all precision,\nleading to a poor choice of scale for the full-precision calculation.\n\nSo the fix is just to remove the upper bound on this local_rscale, as\nwe do for the full-precision calculation. This doesn't impact\nperformance, because it's only computing the logarithm to 8\nsignificant digits at this stage, and when x is very close to 1 like\nthis, ln_var() has very little work to do -- there is no argument\nreduction to do, and the Taylor series terminates on the second term,\nsince 1-x is so small.\n\nComing up with a test case that doesn't have thousands of digits is a\nbit fiddly, so I chose one where most of the significant digits of the\nresult are a long way after the decimal point and shifted them up,\nwhich makes the loss of precision in HEAD more obvious. The expected\nresult can be verified using bc with a scale of 2000.\n\nRegards,\nDean", "msg_date": "Sun, 12 Sep 2021 20:36:05 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On 2021-Sep-12, Dean Rasheed wrote:\n\n> So the fix is just to remove the upper bound on this local_rscale, as\n> we do for the full-precision calculation. This doesn't impact\n> performance, because it's only computing the logarithm to 8\n> significant digits at this stage, and when x is very close to 1 like\n> this, ln_var() has very little work to do -- there is no argument\n> reduction to do, and the Taylor series terminates on the second term,\n> since 1-x is so small.\n\nI came here just to opine that there should be a comment about there not\nbeing a clamp to the maximum scale. For example, log_var says \"Set the\nscales .. so that they each have more digits ...\" which seems clear\nenough; I think the new comment is a bit on the short side.\n\n> Coming up with a test case that doesn't have thousands of digits is a\n> bit fiddly, so I chose one where most of the significant digits of the\n> result are a long way after the decimal point and shifted them up,\n> which makes the loss of precision in HEAD more obvious. The expected\n> result can be verified using bc with a scale of 2000.\n\nI couldn't get bc (version 1.07.1) to output the result; it says\n\nRuntime warning (func=(main), adr=47): non-zero scale in exponent\nRuntime error (func=(main), adr=47): exponent too large in raise\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Mon, 13 Sep 2021 13:51:17 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Mon, 13 Sept 2021 at 17:51, Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> I came here just to opine that there should be a comment about there not\n> being a clamp to the maximum scale. For example, log_var says \"Set the\n> scales .. so that they each have more digits ...\" which seems clear\n> enough; I think the new comment is a bit on the short side.\n>\n\nOK, that's a fair point. Updated version attached.\n\n> I couldn't get bc (version 1.07.1) to output the result; it says\n>\n> Runtime warning (func=(main), adr=47): non-zero scale in exponent\n> Runtime error (func=(main), adr=47): exponent too large in raise\n>\n\nAh yes, bc's \"^\" operator is a bit limited. It doesn't support\nfractional powers for example, and evidently doesn't like powers that\nlarge. I'm so used to not using it that I didn't notice - I always\njust use exp() and ln() in bc to compute powers:\n\nscale=2000\ne(l(1 - 1.500012345678*10^-1000) * 1.45*10^1003) * 10^1000\n\nRegards,\nDean", "msg_date": "Mon, 13 Sep 2021 19:29:13 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Mon, Sep 13, 2021 at 07:29:13PM +0100, Dean Rasheed wrote:\n> On Mon, 13 Sept 2021 at 17:51, Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> >\n> > I came here just to opine that there should be a comment about there not\n> > being a clamp to the maximum scale. For example, log_var says \"Set the\n> > scales .. so that they each have more digits ...\" which seems clear\n> > enough; I think the new comment is a bit on the short side.\n> >\n> \n> OK, that's a fair point. Updated version attached.\n> \n\nHi Dean,\n\nAre you planning to commit this soon?\n\n-- \nJaime Casanova\nDirector de Servicios Profesionales\nSystemGuards - Consultores de PostgreSQL\n\n\n", "msg_date": "Thu, 30 Sep 2021 12:25:07 -0500", "msg_from": "Jaime Casanova <jcasanov@systemguards.com.ec>", "msg_from_op": false, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Thu, 30 Sept 2021 at 18:25, Jaime Casanova\n<jcasanov@systemguards.com.ec> wrote:\n>\n> Are you planning to commit this soon?\n>\n\nYes, I'll take a look at it next week.\n\nI think it's worth backpatching, despite the fact that it's a pretty\nobscure corner case that probably isn't affecting anyone -- similar\nfixes in this area have been backpatched, and keeping the code in the\nback branches in sync will help with future maintenance and testing,\nif any other bugs are found.\n\nRegards,\nDean\n\n\n", "msg_date": "Fri, 1 Oct 2021 07:56:33 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric x^y for negative x" }, { "msg_contents": "On Fri, Oct 01, 2021 at 07:56:33AM +0100, Dean Rasheed wrote:\n> On Thu, 30 Sept 2021 at 18:25, Jaime Casanova\n> <jcasanov@systemguards.com.ec> wrote:\n> >\n> > Are you planning to commit this soon?\n> >\n> \n> Yes, I'll take a look at it next week.\n> \n\nHi Dean,\n\nGreat! I'll move the CF entry to the Next Commitfest so we can move to\nclosable state.\n\n-- \nJaime Casanova\nDirector de Servicios Profesionales\nSystemGuards - Consultores de PostgreSQL\n\n\n", "msg_date": "Fri, 1 Oct 2021 09:25:10 -0500", "msg_from": "Jaime Casanova <jcasanov@systemguards.com.ec>", "msg_from_op": false, "msg_subject": "Re: Numeric x^y for negative x" } ]
[ { "msg_contents": "Hi all,\n\nI realized that we use the magic number 10 instead of\nPG_STAT_GET_REPLICATION_SLOT_COLS in pg_stat_get_replication_slot()\nfunction. It seems an oversight of the original commit. Attached patch\nfixes it.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/", "msg_date": "Tue, 29 Jun 2021 20:41:34 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Use PG_STAT_GET_REPLICATION_SLOT_COLS in\n pg_stat_get_replication_slot()" }, { "msg_contents": "On Tue, Jun 29, 2021 at 5:12 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> Hi all,\n>\n> I realized that we use the magic number 10 instead of\n> PG_STAT_GET_REPLICATION_SLOT_COLS in pg_stat_get_replication_slot()\n> function. It seems an oversight of the original commit. Attached patch\n> fixes it.\n>\n\nLGTM. I'll take care of it tomorrow.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 29 Jun 2021 18:07:30 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Use PG_STAT_GET_REPLICATION_SLOT_COLS in\n pg_stat_get_replication_slot()" }, { "msg_contents": "On Tue, Jun 29, 2021 at 6:07 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Tue, Jun 29, 2021 at 5:12 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > Hi all,\n> >\n> > I realized that we use the magic number 10 instead of\n> > PG_STAT_GET_REPLICATION_SLOT_COLS in pg_stat_get_replication_slot()\n> > function. It seems an oversight of the original commit. Attached patch\n> > fixes it.\n> >\n>\n> LGTM. I'll take care of it tomorrow.\n>\n\nPushed.\n\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 30 Jun 2021 13:42:37 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Use PG_STAT_GET_REPLICATION_SLOT_COLS in\n pg_stat_get_replication_slot()" }, { "msg_contents": "On Wed, Jun 30, 2021 at 5:12 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Tue, Jun 29, 2021 at 6:07 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >\n> > On Tue, Jun 29, 2021 at 5:12 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > Hi all,\n> > >\n> > > I realized that we use the magic number 10 instead of\n> > > PG_STAT_GET_REPLICATION_SLOT_COLS in pg_stat_get_replication_slot()\n> > > function. It seems an oversight of the original commit. Attached patch\n> > > fixes it.\n> > >\n> >\n> > LGTM. I'll take care of it tomorrow.\n> >\n>\n> Pushed.\n\nThanks!\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Wed, 30 Jun 2021 20:48:14 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Use PG_STAT_GET_REPLICATION_SLOT_COLS in\n pg_stat_get_replication_slot()" } ]
[ { "msg_contents": "Hi,\n\nNot per Coverity.\n\nhash_choose_num_partitions function has issues.\nThere are at least two path calls made with used_bits = 0.\nSee at hashagg_spill_init.\n\nTo confirm, I run this code on cpp.sh:\nint main()\n{\n int npartitions = 0;\n int used_bits = 0;\n int partition_bits = 0;\n int i;\n\n for(i = 0; i <= 32; i++) {\n /* make sure that we don't exhaust the hash bits */\n if (partition_bits + used_bits >= 32)\n partition_bits = 32 - used_bits;\n npartitions = 1L << partition_bits;\n printf(\"used_bits=%d\\n\", used_bits);\n printf(\"partition_bits=%d\\n\", partition_bits);\n printf(\"npartitions=%d\\n\\n\", npartitions);\n partition_bits++;\n }\n }\n\nWhose output would be:\nused_bits=0\npartition_bits=0\nnpartitions=1\n\nused_bits=0\npartition_bits=1\nnpartitions=2\n\nused_bits=0\npartition_bits=2\nnpartitions=4\n\nused_bits=0\npartition_bits=3\nnpartitions=8\n\nused_bits=0\npartition_bits=4\nnpartitions=16\n\nused_bits=0\npartition_bits=5\nnpartitions=32\n\nused_bits=0\npartition_bits=6\nnpartitions=64\n\nused_bits=0\npartition_bits=7\nnpartitions=128\n\nused_bits=0\npartition_bits=8\nnpartitions=256\n\nused_bits=0\npartition_bits=9\nnpartitions=512\n\nused_bits=0\npartition_bits=10\nnpartitions=1024\n\nused_bits=0\npartition_bits=11\nnpartitions=2048\n\nused_bits=0\npartition_bits=12\nnpartitions=4096\n\nused_bits=0\npartition_bits=13\nnpartitions=8192\n\nused_bits=0\npartition_bits=14\nnpartitions=16384\n\nused_bits=0\npartition_bits=15\nnpartitions=32768\n\nused_bits=0\npartition_bits=16\nnpartitions=65536\n\nused_bits=0\npartition_bits=17\nnpartitions=131072\n\nused_bits=0\npartition_bits=18\nnpartitions=262144\n\nused_bits=0\npartition_bits=19\nnpartitions=524288\n\nused_bits=0\npartition_bits=20\nnpartitions=1048576\n\nused_bits=0\npartition_bits=21\nnpartitions=2097152\n\nused_bits=0\npartition_bits=22\nnpartitions=4194304\n\nused_bits=0\npartition_bits=23\nnpartitions=8388608\n\nused_bits=0\npartition_bits=24\nnpartitions=16777216\n\nused_bits=0\npartition_bits=25\nnpartitions=33554432\n\nused_bits=0\npartition_bits=26\nnpartitions=67108864\n\nused_bits=0\npartition_bits=27\nnpartitions=134217728\n\nused_bits=0\npartition_bits=28\nnpartitions=268435456\n\nused_bits=0\npartition_bits=29\nnpartitions=536870912\n\nused_bits=0\npartition_bits=30\nnpartitions=1073741824\n\nused_bits=0\npartition_bits=31\nnpartitions=-2147483648\n\nused_bits=0\npartition_bits=32\nnpartitions=0\n\nWith partition_bits > 24, is very problematic, but with 31 and 32, it\nbecomes a bug.\nWith npartitions = -2147483648 and 0, the function hashagg_spill_init,\nwill generate an operation that is undefined according to the rules of C.\nspill->mask = (npartitions - 1) << spill->shift;\n\nOn Windows 64 bits (HEAD) fails with partition_prune:\nparallel group (11 tests): reloptions hash_part partition_info explain\ncompression resultcache indexing partition_join partition_aggregate\npartition_prune tuplesort\n partition_join ... ok 3495 ms\n partition_prune ... FAILED 4926 ms\n\ndiff -w -U3\nC:/dll/postgres/postgres_head/src/test/regress/expected/partition_prune.out\nC:/dll/postgres/postgres_head/src/test/regress/results/partition_prune.out\n---\nC:/dll/postgres/postgres_head/src/test/regress/expected/partition_prune.out\n2021-06-23 11:11:26.489575100 -0300\n+++\nC:/dll/postgres/postgres_head/src/test/regress/results/partition_prune.out\n2021-06-29 10:54:43.103775700 -0300\n@@ -2660,7 +2660,7 @@\n --------------------------------------------------------------------------\n Nested Loop (actual rows=3 loops=1)\n -> Seq Scan on tbl1 (actual rows=5 loops=1)\n- -> Append (actual rows=1 loops=5)\n+ -> Append (actual rows=0 loops=5)\n -> Index Scan using tprt1_idx on tprt_1 (never executed)\n Index Cond: (col1 = tbl1.col1)\n -> Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2)\n\nWith patch attached:\nparallel group (11 tests): partition_info hash_part resultcache reloptions\nexplain compression indexing partition_aggregate partition_join tuplesort\npartition_prune\n partition_join ... ok 3013 ms\n partition_prune ... ok 3959 ms\n\nregards,\nRanier Vilela", "msg_date": "Tue, 29 Jun 2021 11:32:44 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Avoid choose invalid number of partitions\n (src/backend/executor/nodeAgg.c)" }, { "msg_contents": "On Wed, 30 Jun 2021 at 02:33, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> hash_choose_num_partitions function has issues.\n> There are at least two path calls made with used_bits = 0.\n> See at hashagg_spill_init.\n\n> On Windows 64 bits (HEAD) fails with partition_prune:\n> parallel group (11 tests): reloptions hash_part partition_info explain compression resultcache indexing partition_join partition_aggregate partition_prune tuplesort\n> partition_join ... ok 3495 ms\n> partition_prune ... FAILED 4926 ms\n>\n> diff -w -U3 C:/dll/postgres/postgres_head/src/test/regress/expected/partition_prune.out C:/dll/postgres/postgres_head/src/test/regress/results/partition_prune.out\n> --- C:/dll/postgres/postgres_head/src/test/regress/expected/partition_prune.out 2021-06-23 11:11:26.489575100 -0300\n> +++ C:/dll/postgres/postgres_head/src/test/regress/results/partition_prune.out 2021-06-29 10:54:43.103775700 -0300\n> @@ -2660,7 +2660,7 @@\n> --------------------------------------------------------------------------\n> Nested Loop (actual rows=3 loops=1)\n> -> Seq Scan on tbl1 (actual rows=5 loops=1)\n> - -> Append (actual rows=1 loops=5)\n> + -> Append (actual rows=0 loops=5)\n> -> Index Scan using tprt1_idx on tprt_1 (never executed)\n> Index Cond: (col1 = tbl1.col1)\n> -> Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2)\n>\n> With patch attached:\n> parallel group (11 tests): partition_info hash_part resultcache reloptions explain compression indexing partition_aggregate partition_join tuplesort partition_prune\n> partition_join ... ok 3013 ms\n> partition_prune ... ok 3959 ms\n\nThis failure was reported to me along with this thread so I had a look at it.\n\nFirstly, I'm a bit confused as to why you think making a change in\nnodeAgg.c would have any effect on a plan that does not contain any\naggregate node.\n\nAs for the regression test failure. I can recreate it, but I did have\nto install VS2019 version 16.9.3 from\nhttps://docs.microsoft.com/en-us/visualstudio/releases/2019/history\n\nThis basically boils down to the 16.9.3 compiler outputting \"0\" for:\n\n#include <stdio.h>\n\nint main(void)\n{\n printf(\"%.0f\\n\", 0.59999999999999998);\n return 0;\n}\n\nbut we expect it to output \"1\".\n\nWe name use of the provided sprintf() function in snprintf.c line 1188 with:\n\nvallen = sprintf(convert, fmt, prec, value);\n\nI don't see the problem in more recent versions of VS2019, but I\ndidn't go to the trouble of figuring out exactly which version this\nwas fixed in.\n\nDavid\n\n\n", "msg_date": "Mon, 30 Aug 2021 22:44:13 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Avoid choose invalid number of partitions\n (src/backend/executor/nodeAgg.c)" }, { "msg_contents": "Em seg., 30 de ago. de 2021 às 07:44, David Rowley <dgrowleyml@gmail.com>\nescreveu:\n\n> On Wed, 30 Jun 2021 at 02:33, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> > hash_choose_num_partitions function has issues.\n> > There are at least two path calls made with used_bits = 0.\n> > See at hashagg_spill_init.\n>\n> > On Windows 64 bits (HEAD) fails with partition_prune:\n> > parallel group (11 tests): reloptions hash_part partition_info explain\n> compression resultcache indexing partition_join partition_aggregate\n> partition_prune tuplesort\n> > partition_join ... ok 3495 ms\n> > partition_prune ... FAILED 4926 ms\n> >\n> > diff -w -U3\n> C:/dll/postgres/postgres_head/src/test/regress/expected/partition_prune.out\n> C:/dll/postgres/postgres_head/src/test/regress/results/partition_prune.out\n> > ---\n> C:/dll/postgres/postgres_head/src/test/regress/expected/partition_prune.out\n> 2021-06-23 11:11:26.489575100 -0300\n> > +++\n> C:/dll/postgres/postgres_head/src/test/regress/results/partition_prune.out\n> 2021-06-29 10:54:43.103775700 -0300\n> > @@ -2660,7 +2660,7 @@\n> >\n> --------------------------------------------------------------------------\n> > Nested Loop (actual rows=3 loops=1)\n> > -> Seq Scan on tbl1 (actual rows=5 loops=1)\n> > - -> Append (actual rows=1 loops=5)\n> > + -> Append (actual rows=0 loops=5)\n> > -> Index Scan using tprt1_idx on tprt_1 (never executed)\n> > Index Cond: (col1 = tbl1.col1)\n> > -> Index Scan using tprt2_idx on tprt_2 (actual rows=1\n> loops=2)\n> >\n> > With patch attached:\n> > parallel group (11 tests): partition_info hash_part resultcache\n> reloptions explain compression indexing partition_aggregate partition_join\n> tuplesort partition_prune\n> > partition_join ... ok 3013 ms\n> > partition_prune ... ok 3959 ms\n>\n> This failure was reported to me along with this thread so I had a look at\n> it.\n>\nThanks.\n\n\n> Firstly, I'm a bit confused as to why you think making a change in\n> nodeAgg.c would have any effect on a plan that does not contain any\n> aggregate node.\n>\nYeah, they are unrelated.\nFor some reason, when checking the regress, partion_prune was ok and I\nmistakenly made a connection with the changes, which is wrong.\n\n\n> As for the regression test failure. I can recreate it, but I did have\n> to install VS2019 version 16.9.3 from\n> https://docs.microsoft.com/en-us/visualstudio/releases/2019/history\n>\n> This basically boils down to the 16.9.3 compiler outputting \"0\" for:\n>\n> #include <stdio.h>\n>\n> int main(void)\n> {\n> printf(\"%.0f\\n\", 0.59999999999999998);\n> return 0;\n> }\n>\n> but we expect it to output \"1\".\n>\n> We name use of the provided sprintf() function in snprintf.c line 1188\n> with:\n>\n> vallen = sprintf(convert, fmt, prec, value);\n>\n> I don't see the problem in more recent versions of VS2019, but I\n> didn't go to the trouble of figuring out exactly which version this\n> was fixed in.\n>\nRegarding this test, with the last msvc, compiled for Debug, it still\noccurs:\n partition_join ... ok 4267 ms\n partition_prune ... FAILED 5270 ms\n reloptions ... ok 755 ms\n hash_part ... ok 494 ms\n\nI still believe it's the compiler problem.\n\nregards,\nRanier Vilela\n\nEm seg., 30 de ago. de 2021 às 07:44, David Rowley <dgrowleyml@gmail.com> escreveu:On Wed, 30 Jun 2021 at 02:33, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> hash_choose_num_partitions function has issues.\n> There are at least two path calls made with used_bits = 0.\n> See at hashagg_spill_init.\n\n> On Windows 64 bits (HEAD) fails with partition_prune:\n> parallel group (11 tests):  reloptions hash_part partition_info explain compression resultcache indexing partition_join partition_aggregate partition_prune tuplesort\n>      partition_join               ... ok         3495 ms\n>      partition_prune              ... FAILED     4926 ms\n>\n> diff -w -U3 C:/dll/postgres/postgres_head/src/test/regress/expected/partition_prune.out C:/dll/postgres/postgres_head/src/test/regress/results/partition_prune.out\n> --- C:/dll/postgres/postgres_head/src/test/regress/expected/partition_prune.out 2021-06-23 11:11:26.489575100 -0300\n> +++ C:/dll/postgres/postgres_head/src/test/regress/results/partition_prune.out 2021-06-29 10:54:43.103775700 -0300\n> @@ -2660,7 +2660,7 @@\n>  --------------------------------------------------------------------------\n>   Nested Loop (actual rows=3 loops=1)\n>     ->  Seq Scan on tbl1 (actual rows=5 loops=1)\n> -   ->  Append (actual rows=1 loops=5)\n> +   ->  Append (actual rows=0 loops=5)\n>           ->  Index Scan using tprt1_idx on tprt_1 (never executed)\n>                 Index Cond: (col1 = tbl1.col1)\n>           ->  Index Scan using tprt2_idx on tprt_2 (actual rows=1 loops=2)\n>\n> With patch attached:\n> parallel group (11 tests):  partition_info hash_part resultcache reloptions explain compression indexing partition_aggregate partition_join tuplesort partition_prune\n>      partition_join               ... ok         3013 ms\n>      partition_prune              ... ok         3959 ms\n\nThis failure was reported to me along with this thread so I had a look at it.Thanks. \n\nFirstly, I'm a bit confused as to why you think making a change in\nnodeAgg.c would have any effect on a plan that does not contain any\naggregate node.Yeah, they are unrelated. For some reason, when checking the regress, partion_prune was ok and I mistakenly made a connection with the changes, which is wrong.\n\nAs for the regression test failure. I can recreate it, but I did have\nto install VS2019 version 16.9.3 from\nhttps://docs.microsoft.com/en-us/visualstudio/releases/2019/history\n\nThis basically boils down to the 16.9.3 compiler outputting \"0\" for:\n\n#include <stdio.h>\n\nint main(void)\n{\n    printf(\"%.0f\\n\", 0.59999999999999998);\n    return 0;\n}\n\nbut we expect it to output \"1\".\n\nWe name use of the provided sprintf() function in snprintf.c line 1188 with:\n\nvallen = sprintf(convert, fmt, prec, value);\n\nI don't see the problem in more recent versions of VS2019, but I\ndidn't go to the trouble of figuring out exactly which version this\nwas fixed in.Regarding this test, with the last msvc, compiled for Debug, it still occurs:     partition_join               ... ok         4267 ms     partition_prune              ... FAILED     5270 ms     reloptions                   ... ok          755 ms     hash_part                    ... ok          494 msI still believe it's the compiler problem.regards,Ranier Vilela", "msg_date": "Mon, 30 Aug 2021 20:18:45 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Avoid choose invalid number of partitions\n (src/backend/executor/nodeAgg.c)" } ]
[ { "msg_contents": "Hi,\n\nThe program pg_receivewal can use gzip compression to store the received WAL.\nThis patch teaches it to be able to use lz4 compression if the binary is build\nusing the -llz4 flag.\n\nPreviously, the user had to use the option --compress with a value between [0-9]\nto denote that gzip compression was requested. This specific behaviour is\nmaintained. A newly introduced option --compress-program=lz4 can be used to ask\nfor the logs to be compressed using lz4 instead. In that case, no compression\nvalues can be selected as it does not seem too useful.\n\nUnder the hood there is nothing exceptional to be noted. Tar based archives have\nnot yet been taught to use lz4 compression. Those are used by pg_basebackup. If\nis is felt useful, then it is easy to be added in a new patch.\n\nCheers,\n//Georgios", "msg_date": "Tue, 29 Jun 2021 14:45:17 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Tue, Jun 29, 2021 at 02:45:17PM +0000, gkokolatos@pm.me wrote:\n> The program pg_receivewal can use gzip compression to store the received WAL.\n> This patch teaches it to be able to use lz4 compression if the binary is build\n> using the -llz4 flag.\n\nNice.\n\n> Previously, the user had to use the option --compress with a value between [0-9]\n> to denote that gzip compression was requested. This specific behaviour is\n> maintained. A newly introduced option --compress-program=lz4 can be used to ask\n> for the logs to be compressed using lz4 instead. In that case, no compression\n> values can be selected as it does not seem too useful.\n\nYes, I am not convinced either that we should care about making the\nacceleration customizable.\n\n> Under the hood there is nothing exceptional to be noted. Tar based archives have\n> not yet been taught to use lz4 compression. Those are used by pg_basebackup. If\n> is is felt useful, then it is easy to be added in a new patch.\n\nDocumentation is missing from the patch.\n\n+ LZ4F_compressionContext_t ctx;\n+ size_t outbufCapacity;\n+ void *outbuf;\nIt may be cleaner to refer to lz4 in the name of those variables?\n\n+ ctx_out = LZ4F_createCompressionContext(&ctx, LZ4F_VERSION);\n+ outbufCapacity = LZ4F_compressBound(LZ4_IN_SIZE, NULL /* default preferences */);\nInteresting. So this cannot be done at compilation time because of\nthe auto-flush mode looking at the LZ4 code. That looks about right.\n\ngetopt_long() is forgotting the new option 'I'.\n\n+ system_or_bail('lz4', '-t', $lz4_wals[0]);\nI think that you should just drop this part of the test. The only\npart of LZ4 that we require to be present when Postgres is built with\n--with-lz4 is its library liblz4. Commands associated to it may not\nbe around, causing this test to fail. The test checking that one .lz4\nfile has been created is good to have. It may be worth adding a test\nwith a .lz4.partial segment generated and --endpos pointing to a LSN\nthat does not finish the segment that gets switched.\n\nIt seems to me that you are missing some logic in\nFindStreamingStart() to handle LZ4-compressed segments, in relation\nwith IsCompressXLogFileName() and IsPartialCompressXLogFileName().\n\n+ pg_log_error(\"invalid compress-program \\\"%s\\\"\", optarg);\n\"compress-program\" sounds weird. Shouldn't that just say \"invalid\ncompression method\" or similar?\n\n+ printf(_(\" -Z, --compress=0-9 compress logs with given\ncompression level (available only with compress-program=zlib)\\n\"));\nThis line is too long.\n\nShould we have more tests for ZLIB, while on it? That seems like a\ngood addition as long as we can skip the tests conditionally when\nthat's not supported.\n--\nMichael", "msg_date": "Wed, 30 Jun 2021 12:38:03 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Tue, Jun 29, 2021 at 8:15 PM <gkokolatos@pm.me> wrote:\n>\n> Hi,\n>\n> The program pg_receivewal can use gzip compression to store the received WAL.\n> This patch teaches it to be able to use lz4 compression if the binary is build\n> using the -llz4 flag.\n\n+1 for the idea\n\nSome comments/suggestions on the patch\n\n1.\n@@ -90,7 +91,8 @@ usage(void)\n printf(_(\" --synchronous flush write-ahead log immediately\nafter writing\\n\"));\n printf(_(\" -v, --verbose output verbose messages\\n\"));\n printf(_(\" -V, --version output version information, then exit\\n\"));\n- printf(_(\" -Z, --compress=0-9 compress logs with given\ncompression level\\n\"));\n+ printf(_(\" -I, --compress-program use this program for compression\\n\"));\n\nWouldn't it be better to call it compression method instead of\ncompression program?\n\n2.\n+ printf(_(\" -Z, --compress=0-9 compress logs with given\ncompression level (available only with compress-program=zlib)\\n\"));\n\nI think we can somehow use \"acceleration\" parameter of lz4 compression\nto map on compression level, It is not direct mapping but\ncan't we create some internal mapping instead of completely ignoring\nthis option for lz4, or we can provide another option for lz4?\n\n3. Should we also support LZ4 compression using dictionary?\n\n-- \nRegards,\nDilip Kumar\nEnterpriseDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Wed, 30 Jun 2021 12:03:55 +0530", "msg_from": "Dilip Kumar <dilipbalaut@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Wed, Jun 30, 2021 at 8:34 AM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n>\n> On Tue, Jun 29, 2021 at 8:15 PM <gkokolatos@pm.me> wrote:\n> >\n> > Hi,\n> >\n> > The program pg_receivewal can use gzip compression to store the received WAL.\n> > This patch teaches it to be able to use lz4 compression if the binary is build\n> > using the -llz4 flag.\n>\n> +1 for the idea\n>\n> Some comments/suggestions on the patch\n>\n> 1.\n> @@ -90,7 +91,8 @@ usage(void)\n> printf(_(\" --synchronous flush write-ahead log immediately\n> after writing\\n\"));\n> printf(_(\" -v, --verbose output verbose messages\\n\"));\n> printf(_(\" -V, --version output version information, then exit\\n\"));\n> - printf(_(\" -Z, --compress=0-9 compress logs with given\n> compression level\\n\"));\n> + printf(_(\" -I, --compress-program use this program for compression\\n\"));\n>\n> Wouldn't it be better to call it compression method instead of\n> compression program?\n\nI came here to say exactly that, just had to think up what I thought\nwas the better name first. Either method or algorithm, but method\nseems like the much simpler choice and therefore better in this case.\n\nShould is also then not be --compression-method, rather than --compress-method?\n\n--\n Magnus Hagander\n Me: https://www.hagander.net/\n Work: https://www.redpill-linpro.com/\n\n\n", "msg_date": "Thu, 1 Jul 2021 12:28:08 +0200", "msg_from": "Magnus Hagander <magnus@hagander.net>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "\n\n‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Thursday, July 1st, 2021 at 12:28, Magnus Hagander <magnus@hagander.net> wrote:\n\n> On Wed, Jun 30, 2021 at 8:34 AM Dilip Kumar dilipbalaut@gmail.com wrote:\n>\n> > On Tue, Jun 29, 2021 at 8:15 PM gkokolatos@pm.me wrote:\n> >\n> > > Hi,\n> > >\n> > > The program pg_receivewal can use gzip compression to store the received WAL.\n> > >\n> > > This patch teaches it to be able to use lz4 compression if the binary is build\n> > >\n> > > using the -llz4 flag.\n> >\n> > +1 for the idea\n> >\n> > Some comments/suggestions on the patch\n> >\n> > @@ -90,7 +91,8 @@ usage(void)\n> >\n> > printf((\" --synchronous flush write-ahead log immediately\n> >\n> > after writing\\n\"));\n> >\n> > printf((\" -v, --verbose output verbose messages\\n\"));\n> >\n> > printf(_(\" -V, --version output version information, then exit\\n\"));\n> >\n> > - printf(_(\" -Z, --compress=0-9 compress logs with given\n> >\n> > compression level\\n\"));\n> >\n> > - printf(_(\" -I, --compress-program use this program for compression\\n\"));\n> >\n> > Wouldn't it be better to call it compression method instead of\n> >\n> > compression program?\n>\n> I came here to say exactly that, just had to think up what I thought\n>\n> was the better name first. Either method or algorithm, but method\n>\n> seems like the much simpler choice and therefore better in this case.\n>\n> Should is also then not be --compression-method, rather than --compress-method?\n\nNot a problem. To be very transparent, I first looked what was already out there.\nFor example `tar` is using\n -I, --use-compress-program=PROG\nyet the 'use-' bit would push the alignment of the --help output, so I removed it.\n\nTo me, as a non native English speaker, `--compression-method` does sound better.\nI can just re-align the rest of the help output.\n\nUpdated patch is on the making.\n\n>\n> --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n>\n> Magnus Hagander\n>\n> Me: https://www.hagander.net/\n>\n> Work: https://www.redpill-linpro.com/\n\n\n", "msg_date": "Thu, 01 Jul 2021 13:39:29 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Thu, Jul 1, 2021 at 3:39 PM <gkokolatos@pm.me> wrote:\n>\n>\n>\n> ‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n>\n> On Thursday, July 1st, 2021 at 12:28, Magnus Hagander <magnus@hagander.net> wrote:\n>\n> > On Wed, Jun 30, 2021 at 8:34 AM Dilip Kumar dilipbalaut@gmail.com wrote:\n> >\n> > > On Tue, Jun 29, 2021 at 8:15 PM gkokolatos@pm.me wrote:\n> > >\n> > > > Hi,\n> > > >\n> > > > The program pg_receivewal can use gzip compression to store the received WAL.\n> > > >\n> > > > This patch teaches it to be able to use lz4 compression if the binary is build\n> > > >\n> > > > using the -llz4 flag.\n> > >\n> > > +1 for the idea\n> > >\n> > > Some comments/suggestions on the patch\n> > >\n> > > @@ -90,7 +91,8 @@ usage(void)\n> > >\n> > > printf((\" --synchronous flush write-ahead log immediately\n> > >\n> > > after writing\\n\"));\n> > >\n> > > printf((\" -v, --verbose output verbose messages\\n\"));\n> > >\n> > > printf(_(\" -V, --version output version information, then exit\\n\"));\n> > >\n> > > - printf(_(\" -Z, --compress=0-9 compress logs with given\n> > >\n> > > compression level\\n\"));\n> > >\n> > > - printf(_(\" -I, --compress-program use this program for compression\\n\"));\n> > >\n> > > Wouldn't it be better to call it compression method instead of\n> > >\n> > > compression program?\n> >\n> > I came here to say exactly that, just had to think up what I thought\n> >\n> > was the better name first. Either method or algorithm, but method\n> >\n> > seems like the much simpler choice and therefore better in this case.\n> >\n> > Should is also then not be --compression-method, rather than --compress-method?\n>\n> Not a problem. To be very transparent, I first looked what was already out there.\n> For example `tar` is using\n> -I, --use-compress-program=PROG\n> yet the 'use-' bit would push the alignment of the --help output, so I removed it.\n\nI think the difference there is that tar actually calls an external\nprogram to do the work... And we are using the built-in library,\nright?\n\n--\n Magnus Hagander\n Me: https://www.hagander.net/\n Work: https://www.redpill-linpro.com/\n\n\n", "msg_date": "Thu, 1 Jul 2021 15:58:24 +0200", "msg_from": "Magnus Hagander <magnus@hagander.net>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "\n\n‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Thursday, July 1st, 2021 at 15:58, Magnus Hagander <magnus@hagander.net> wrote:\n\n> On Thu, Jul 1, 2021 at 3:39 PM gkokolatos@pm.me wrote:\n>\n> > ‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n> >\n> > On Thursday, July 1st, 2021 at 12:28, Magnus Hagander magnus@hagander.net wrote:\n> >\n> > > On Wed, Jun 30, 2021 at 8:34 AM Dilip Kumar dilipbalaut@gmail.com wrote:\n> > >\n> > > > On Tue, Jun 29, 2021 at 8:15 PM gkokolatos@pm.me wrote:\n> > > >\n> > > > > Hi,\n> > > > >\n> > > > > The program pg_receivewal can use gzip compression to store the received WAL.\n> > > > >\n> > > > > This patch teaches it to be able to use lz4 compression if the binary is build\n> > > > >\n> > > > > using the -llz4 flag.\n> > > >\n> > > > +1 for the idea\n> > > >\n> > > > Some comments/suggestions on the patch\n> > > >\n> > > > @@ -90,7 +91,8 @@ usage(void)\n> > > >\n> > > > printf((\" --synchronous flush write-ahead log immediately\n> > > >\n> > > > after writing\\n\"));\n> > > >\n> > > > printf((\" -v, --verbose output verbose messages\\n\"));\n> > > >\n> > > > printf(_(\" -V, --version output version information, then exit\\n\"));\n> > > >\n> > > > - printf(_(\" -Z, --compress=0-9 compress logs with given\n> > > >\n> > > > compression level\\n\"));\n> > > >\n> > > > - printf(_(\" -I, --compress-program use this program for compression\\n\"));\n> > > >\n> > > >\n> > > > Wouldn't it be better to call it compression method instead of\n> > > >\n> > > > compression program?\n> > >\n> > > I came here to say exactly that, just had to think up what I thought\n> > >\n> > > was the better name first. Either method or algorithm, but method\n> > >\n> > > seems like the much simpler choice and therefore better in this case.\n> > >\n> > > Should is also then not be --compression-method, rather than --compress-method?\n> >\n> > Not a problem. To be very transparent, I first looked what was already out there.\n> >\n> > For example `tar` is using\n> >\n> > -I, --use-compress-program=PROG\n> >\n> > yet the 'use-' bit would push the alignment of the --help output, so I removed it.\n>\n> I think the difference there is that tar actually calls an external\n>\n> program to do the work... And we are using the built-in library,\n>\n> right?\n\nYou are very correct :) I am not objecting the change at all. Just let you know\nhow I chose that. You know, naming is dead easy and all...\n\nOn a more serious note, what about the `-I` short flag? Should we keep it or\nis there a better one to be used?\n\nMicheal suggested on the same thread to move my entry in the help output so that\nthe output remains ordered. I would like the options for the compression method and\nthe already existing compression level to next to each other if possible. Then it\nshould be either 'X' or 'Y'.\n\nThoughts?\n\n\n\n>\n> ------------------------------------------------------------------------------------------------------------------------------------------------\n>\n> Magnus Hagander\n>\n> Me: https://www.hagander.net/\n>\n> Work: https://www.redpill-linpro.com/\n\n\n", "msg_date": "Thu, 01 Jul 2021 14:10:17 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Thu, Jul 01, 2021 at 02:10:17PM +0000, gkokolatos@pm.me wrote:\n> Micheal suggested on the same thread to move my entry in the help output so that\n> the output remains ordered. I would like the options for the compression method and\n> the already existing compression level to next to each other if possible. Then it\n> should be either 'X' or 'Y'.\n\nHmm. Grouping these together makes sense for the user. One choice\nthat we have here is to drop the short option, and just use a long\none. What I think is important for the user when it comes to this\noption is the consistency of its naming across all the tools\nsupporting it. pg_dump and pg_basebackup, where we could plug LZ4,\nalready use most of the short options you could use for pg_receivewal,\nhaving only a long one gives a bit more flexibility. \n--\nMichael", "msg_date": "Fri, 2 Jul 2021 10:10:16 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "\n\n‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Friday, July 2nd, 2021 at 03:10, Michael Paquier <michael@paquier.xyz> wrote:\n\n> On Thu, Jul 01, 2021 at 02:10:17PM +0000, gkokolatos@pm.me wrote:\n>\n> > Micheal suggested on the same thread to move my entry in the help output so that\n> >\n> > the output remains ordered. I would like the options for the compression method and\n> >\n> > the already existing compression level to next to each other if possible. Then it\n> >\n> > should be either 'X' or 'Y'.\n>\n> Hmm. Grouping these together makes sense for the user. One choice\n>\n> that we have here is to drop the short option, and just use a long\n>\n> one. What I think is important for the user when it comes to this\n>\n> option is the consistency of its naming across all the tools\n>\n> supporting it. pg_dump and pg_basebackup, where we could plug LZ4,\n>\n> already use most of the short options you could use for pg_receivewal,\n>\n> having only a long one gives a bit more flexibility.\n\n\nGood point. I am going with that one.\n\n\n> --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n>\n> Michael\n\n\n", "msg_date": "Fri, 02 Jul 2021 07:35:00 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "Hi,\n\nplease find v2 of the patch which tries to address the commends received so far.\n\nThank you all for your comments.\n\nMichael Paquier wrote:\n\n> Documentation is missing from the patch.\n>\nIt has now been added.\n\n> + LZ4F_compressionContext_t ctx;\n> + size_t outbufCapacity;\n> + void *outbuf;\n> It may be cleaner to refer to lz4 in the name of those variables?\n\nAgreed and done\n\n> + ctx_out = LZ4F_createCompressionContext(&ctx, LZ4F_VERSION);\n> + outbufCapacity = LZ4F_compressBound(LZ4_IN_SIZE, NULL /* default preferences */);\n> Interesting. So this cannot be done at compilation time because of\n> the auto-flush mode looking at the LZ4 code. That looks about right.\n\nThis is also my understanding.\n\n> + system_or_bail('lz4', '-t', $lz4_wals[0]);\n> I think that you should just drop this part of the test. The only\n> part of LZ4 that we require to be present when Postgres is built with\n> --with-lz4 is its library liblz4. Commands associated to it may not\n> be around, causing this test to fail. The test checking that one .lz4\n> file has been created is good to have. It may be worth adding a test\n> with a .lz4.partial segment generated and --endpos pointing to a LSN\n> that does not finish the segment that gets switched.\n\nI humbly disagree with the need for the test. It is rather easily possible\nto generate a file that can not be decoded, thus becoming useless. Having the\ntest will provide some guarantee for the fact. In the current patch, there\nis code to find out if the program lz4 is available in the system. If it is\nnot available, then that specific test is skipped. The rest remains as it\nwere. Also `system_or_bail` is not used anymore in favour of the `system_log`\nso that the test counted and the execution of tests continues upon failure.\n\n\n> It seems to me that you are missing some logic in\n> FindStreamingStart() to handle LZ4-compressed segments, in relation\n> with IsCompressXLogFileName() and IsPartialCompressXLogFileName().\n\nVery correct. The logic is now added. Given the lz4 api, one has to fill\nin the uncompressed size during creation time. Then one can read the\nheaders and verify the expectations.\n\n\n> Should we have more tests for ZLIB, while on it? That seems like a\n> good addition as long as we can skip the tests conditionally when\n> that's not supported.\n\nAgreed. Please allow me to provide a distinct patch for this code.\n\n\nDilip Kumar wrote:\n\n> Wouldn't it be better to call it compression method instead of\n> compression program?\n\nAgreed. This is inline with the suggestions of other reviewers.\nFind the change in the attached patch.\n\n> I think we can somehow use \"acceleration\" parameter of lz4 compression\n> to map on compression level, It is not direct mapping but\n> can't we create some internal mapping instead of completely ignoring\n> this option for lz4, or we can provide another option for lz4?\n\nWe can, though I am not in favour of doing so. There is seemingly\nlittle benefit for added complexity.\n\n> Should we also support LZ4 compression using dictionary?\n\nI would we should not do that. If my understanding is correct,\ndecompression would require the dictionary to be passed along.\nThe algorithm seems to be very competitive to the current compression\nas is.\n\nMagnus Hagander wrote:\n\n> I came here to say exactly that, just had to think up what I thought\n> was the better name first. Either method or algorithm, but method\n> seems like the much simpler choice and therefore better in this case.\n>\n> Should is also then not be --compression-method, rather than --compress-method?\n\nAgreed and changed throughout.\n\n\nMichael Paquier wrote:\n\n> What I think is important for the user when it comes to this\n> option is the consistency of its naming across all the tools\n> supporting it. pg_dump and pg_basebackup, where we could plug LZ4,\n> already use most of the short options you could use for pg_receivewal,\n> having only a long one gives a bit more flexibility.\n\nDone.\n\nCheers,\n//Georgios", "msg_date": "Thu, 08 Jul 2021 14:18:40 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Thu, Jul 08, 2021 at 02:18:40PM +0000, gkokolatos@pm.me wrote:\n> please find v2 of the patch which tries to address the commends\n> received so far.\n\nThanks!\n\n> Michael Paquier wrote:\n>> + system_or_bail('lz4', '-t', $lz4_wals[0]);\n>> I think that you should just drop this part of the test. The only\n>> part of LZ4 that we require to be present when Postgres is built with\n>> --with-lz4 is its library liblz4. Commands associated to it may not\n>> be around, causing this test to fail. The test checking that one .lz4\n>> file has been created is good to have. It may be worth adding a test\n>> with a .lz4.partial segment generated and --endpos pointing to a LSN\n>> that does not finish the segment that gets switched.\n> \n> I humbly disagree with the need for the test. It is rather easily possible\n> to generate a file that can not be decoded, thus becoming useless. Having the\n> test will provide some guarantee for the fact. In the current patch, there\n> is code to find out if the program lz4 is available in the system. If it is\n> not available, then that specific test is skipped. The rest remains as it\n> were. Also `system_or_bail` is not used anymore in favour of the `system_log`\n> so that the test counted and the execution of tests continues upon failure.\n\nCheck. I can see what you are using in the new patch. We could live\nwith that.\n\n>> It seems to me that you are missing some logic in\n>> FindStreamingStart() to handle LZ4-compressed segments, in relation\n>> with IsCompressXLogFileName() and IsPartialCompressXLogFileName().\n> \n> Very correct. The logic is now added. Given the lz4 api, one has to fill\n> in the uncompressed size during creation time. Then one can read the\n> headers and verify the expectations.\n\nYeah, I read that as well with lz4 --list and the kind. That's weird\ncompared to how ZLIB gives an easy access to it. We may want to do an\neffort and tell about more lz4 --content-size/--list, telling that we\nadd the size in the segment compressed because we have to and LZ4 does\nnot do it by default?\n\n>> Should we have more tests for ZLIB, while on it? That seems like a\n>> good addition as long as we can skip the tests conditionally when\n>> that's not supported.\n> \n> Agreed. Please allow me to provide a distinct patch for this code.\n\nThanks. Looking forward to seeing it. That may be better on a\nseparate thread, even if I digressed in this thread :)\n\n>> I think we can somehow use \"acceleration\" parameter of lz4 compression\n>> to map on compression level, It is not direct mapping but\n>> can't we create some internal mapping instead of completely ignoring\n>> this option for lz4, or we can provide another option for lz4?\n> \n> We can, though I am not in favour of doing so. There is seemingly\n> little benefit for added complexity.\n\nAgreed.\n\n>> What I think is important for the user when it comes to this\n>> option is the consistency of its naming across all the tools\n>> supporting it. pg_dump and pg_basebackup, where we could plug LZ4,\n>> already use most of the short options you could use for pg_receivewal,\n>> having only a long one gives a bit more flexibility.\n> \n> Done.\n\n * http://www.zlib.org/rfc-gzip.html.\n+ * For lz4 compressed segments\n */\nThis comment is incomplete.\t\t \n\n+#define IsLZ4CompressXLogFileName(fname) \\\n+ (strlen(fname) == XLOG_FNAME_LEN + strlen(\".lz4\") && \\\n+ strspn(fname, \"0123456789ABCDEF\") == XLOG_FNAME_LEN && \\\n+ strcmp((fname) + XLOG_FNAME_LEN, \".lz4\") == 0)\n+#define IsLZ4PartialCompressXLogFileName(fname) \\\n+ (strlen(fname) == XLOG_FNAME_LEN + strlen(\".lz4.partial\") && \\\n+ strspn(fname, \"0123456789ABCDEF\") == XLOG_FNAME_LEN && \\\n+ strcmp((fname) + XLOG_FNAME_LEN, \".lz4.partial\") == 0)\nThis is getting complicated. Would it be better to change this stuff\nand switch to a routine that checks if a segment has a valid name, is\npartial, and the type of compression that applied to it? It seems to\nme that we should group iszlibcompress and islz4compress together with\nthe options available through compression_method.\n\n+ if (compresslevel != 0)\n+ {\n+ if (compression_method == COMPRESSION_NONE)\n+ {\n+ compression_method = COMPRESSION_ZLIB;\n+ }\n+ if (compression_method != COMPRESSION_ZLIB)\n+ {\n+ pg_log_error(\"cannot use --compress when \"\n+ \"--compression-method is not gzip\");\n+ fprintf(stderr, _(\"Try \\\"%s --help\\\" for more information.\\n\"),\n+ progname);\n+ exit(1);\n+ }\n+ }\nFor compatibility where --compress enforces the use of zlib that would\nwork, but this needs a comment explaining the goal of this block. I\nam wondering if it would be better to break the will and just complain\nwhen specifying --compress without --compression-method though. That\nwould cause compatibility issues, but this would make folks aware of\nthe presence of LZ4, which does not sound bad to me either as ZLIB is\nslower than LZ4 on all fronts.\n\n+ else if (compression_method == COMPRESSION_ZLIB)\n+ {\n+ pg_log_error(\"cannot use --compression-method gzip when \"\n+ \"--compression is 0\");\n+ fprintf(stderr, _(\"Try \\\"%s --help\\\" for more information.\\n\"),\n+ progname);\n+ exit(1);\n+ }\nHmm. It would be more natural to enforce a default compression level\nin this case? The user is asking for a compression with zlib here.\n\n+ my $lz4 = $ENV{LZ4};\n[...]\n+ # Verify that the stored file is readable if program lz4 is available\n+ skip \"program lz4 is not found in your system\", 1\n+ if (!defined $lz4 || $lz4 eq '');\nOkay, this is acceptable. Didn't know the existing trick with TAR\neither.\n\n+ /*\n+ * XXX: this is crap... lz4preferences now does show the uncompressed\n+ * size via lz4 --list <filename> but the compression goes down the\n+ * window... also it is not very helpfull to have it at the startm, does\n+ * it?\n+ */\nWhat do you mean here by \"the compression goes out the window\"?\n--\nMichael", "msg_date": "Fri, 9 Jul 2021 11:49:18 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "\n\n‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Friday, July 9th, 2021 at 04:49, Michael Paquier <michael@paquier.xyz> wrote:\n\n> On Thu, Jul 08, 2021 at 02:18:40PM +0000, gkokolatos@pm.me wrote:\n>\n> > please find v2 of the patch which tries to address the commends\n> >\n> > received so far.\n>\n> Thanks!\n>\n> > Michael Paquier wrote:\n> >\n> > > - system_or_bail('lz4', '-t', $lz4_wals[0]);\n> > >\n> > > I think that you should just drop this part of the test. The only\n> > >\n> > > part of LZ4 that we require to be present when Postgres is built with\n> > >\n> > > --with-lz4 is its library liblz4. Commands associated to it may not\n> > >\n> > > be around, causing this test to fail. The test checking that one .lz4\n> > >\n> > > file has been created is good to have. It may be worth adding a test\n> > >\n> > > with a .lz4.partial segment generated and --endpos pointing to a LSN\n> > >\n> > > that does not finish the segment that gets switched.\n> >\n> > I humbly disagree with the need for the test. It is rather easily possible\n> >\n> > to generate a file that can not be decoded, thus becoming useless. Having the\n> >\n> > test will provide some guarantee for the fact. In the current patch, there\n> >\n> > is code to find out if the program lz4 is available in the system. If it is\n> >\n> > not available, then that specific test is skipped. The rest remains as it\n> >\n> > were. Also `system_or_bail` is not used anymore in favour of the `system_log`\n> >\n> > so that the test counted and the execution of tests continues upon failure.\n>\n> Check. I can see what you are using in the new patch. We could live\n>\n> with that.\n\nGreat. Thank you.\n\n>\n> > > It seems to me that you are missing some logic in\n> > >\n> > > FindStreamingStart() to handle LZ4-compressed segments, in relation\n> > >\n> > > with IsCompressXLogFileName() and IsPartialCompressXLogFileName().\n> >\n> > Very correct. The logic is now added. Given the lz4 api, one has to fill\n> >\n> > in the uncompressed size during creation time. Then one can read the\n> >\n> > headers and verify the expectations.\n>\n> Yeah, I read that as well with lz4 --list and the kind. That's weird\n>\n> compared to how ZLIB gives an easy access to it. We may want to do an\n>\n> effort and tell about more lz4 --content-size/--list, telling that we\n>\n> add the size in the segment compressed because we have to and LZ4 does\n>\n> not do it by default?\n\nI am afraid I do not follow. In the patch we do add the uncompressed size\nand then, the uncompressed size is checked against a known value WalSegSz.\nWhat the compressed size will be checked against?\n\n>\n> > > Should we have more tests for ZLIB, while on it? That seems like a\n> > >\n> > > good addition as long as we can skip the tests conditionally when\n> > >\n> > > that's not supported.\n> >\n> > Agreed. Please allow me to provide a distinct patch for this code.\n>\n> Thanks. Looking forward to seeing it. That may be better on a\n>\n> separate thread, even if I digressed in this thread :)\n\nThank you for the comments. I will sent in a separate thread.\n\n>\n> > > I think we can somehow use \"acceleration\" parameter of lz4 compression\n> > >\n> > > to map on compression level, It is not direct mapping but\n> > >\n> > > can't we create some internal mapping instead of completely ignoring\n> > >\n> > > this option for lz4, or we can provide another option for lz4?\n> >\n> > We can, though I am not in favour of doing so. There is seemingly\n> >\n> > little benefit for added complexity.\n>\n> Agreed.\n>\n> > > What I think is important for the user when it comes to this\n> > >\n> > > option is the consistency of its naming across all the tools\n> > >\n> > > supporting it. pg_dump and pg_basebackup, where we could plug LZ4,\n> > >\n> > > already use most of the short options you could use for pg_receivewal,\n> > >\n> > > having only a long one gives a bit more flexibility.\n> >\n> > Done.\n>\n> * http://www.zlib.org/rfc-gzip.html.\n>\n> - - For lz4 compressed segments\n>\n> */\n>\n> This comment is incomplete.\n\nIt is. I will fix.\n\n>\n> +#define IsLZ4CompressXLogFileName(fname) \\\n> - (strlen(fname) == XLOG_FNAME_LEN + strlen(\".lz4\") && \\\n> - strspn(fname, \"0123456789ABCDEF\") == XLOG_FNAME_LEN && \\\n> - strcmp((fname) + XLOG_FNAME_LEN, \".lz4\") == 0)\n>\n> +#define IsLZ4PartialCompressXLogFileName(fname) \\\n> - (strlen(fname) == XLOG_FNAME_LEN + strlen(\".lz4.partial\") && \\\n> - strspn(fname, \"0123456789ABCDEF\") == XLOG_FNAME_LEN && \\\n> - strcmp((fname) + XLOG_FNAME_LEN, \".lz4.partial\") == 0)\n>\n> This is getting complicated. Would it be better to change this stuff\n>\n> and switch to a routine that checks if a segment has a valid name, is\n>\n> partial, and the type of compression that applied to it? It seems to\n>\n> me that we should group iszlibcompress and islz4compress together with\n>\n> the options available through compression_method.\n\nI agree with you. I will refactor.\n\n\n> - if (compresslevel != 0)\n> - {\n> - if (compression_method == COMPRESSION_NONE)\n>\n>\n> - {\n>\n>\n> - compression_method = COMPRESSION_ZLIB;\n>\n>\n> - }\n>\n>\n> - if (compression_method != COMPRESSION_ZLIB)\n>\n>\n> - {\n>\n>\n> - pg_log_error(\"cannot use --compress when \"\n>\n>\n> - \"--compression-method is not gzip\");\n>\n>\n> - fprintf(stderr, _(\"Try \\\\\"%s --help\\\\\" for more information.\\\\n\"),\n>\n>\n> - progname);\n>\n>\n> - exit(1);\n>\n>\n> - }\n>\n>\n> - }\n>\n> For compatibility where --compress enforces the use of zlib that would\n>\n> work, but this needs a comment explaining the goal of this block. I\n>\n> am wondering if it would be better to break the will and just complain\n>\n> when specifying --compress without --compression-method though. That\n>\n> would cause compatibility issues, but this would make folks aware of\n>\n> the presence of LZ4, which does not sound bad to me either as ZLIB is\n>\n> slower than LZ4 on all fronts.\n\n\nI would vote to break the compatibility if that is an option. I chose the\nless invasive approach thinking that breaking the compatibility would not\nbe an option.\n\nUnless others object, I will include --compress as a complimentary option\nto --compression-method in updated version of the patch.\n\n\n> - else if (compression_method == COMPRESSION_ZLIB)\n> - {\n> - pg_log_error(\"cannot use --compression-method gzip when \"\n>\n>\n> - \"--compression is 0\");\n>\n>\n> - fprintf(stderr, _(\"Try \\\\\"%s --help\\\\\" for more information.\\\\n\"),\n>\n>\n> - progname);\n>\n>\n> - exit(1);\n>\n>\n> - }\n>\n> Hmm. It would be more natural to enforce a default compression level\n>\n> in this case? The user is asking for a compression with zlib here.\n\n\nYou are correct, in the current patch passing --compression-method=gzip alone\nis equivalent to passing --compression=0 in the current master version. This\nbehaviour may be confusing for the user. What should the default compression\nbe then? I am inclined to say '5' as a compromise between speed and compression\nration.\n\n\n> - my $lz4 = $ENV{LZ4};\n>\n> [...]\n> - Verify that the stored file is readable if program lz4 is available\n> ===================================================================\n>\n> - skip \"program lz4 is not found in your system\", 1\n> - if (!defined $lz4 || $lz4 eq '');\n>\n>\n>\n> Okay, this is acceptable. Didn't know the existing trick with TAR\n>\n> either.\n\nThank you.\n\n>\n> - /*\n>\n>\n> - * XXX: this is crap... lz4preferences now does show the uncompressed\n>\n>\n> - * size via lz4 --list <filename> but the compression goes down the\n>\n>\n> - * window... also it is not very helpfull to have it at the startm, does\n>\n>\n> - * it?\n>\n>\n> - */\n>\n>\n>\n> What do you mean here by \"the compression goes out the window\"?\n\nPlease consider me adequately embarrassed. This was a personal comment while I was\nworking on the code. It is not correct and it should have never seen the public\nlight.\n\n\nCheers,\n//Georgios\n\n\n> ---------------------------------------------------------------\n>\n> Michael\n\n\n", "msg_date": "Fri, 09 Jul 2021 07:47:47 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Thu, Jul 8, 2021 at 7:48 PM <gkokolatos@pm.me> wrote:\n>\n> Dilip Kumar wrote:\n>\n> > Wouldn't it be better to call it compression method instead of\n> > compression program?\n>\n> Agreed. This is inline with the suggestions of other reviewers.\n> Find the change in the attached patch.\n\nThanks, I will have a look.\n\n> > I think we can somehow use \"acceleration\" parameter of lz4 compression\n> > to map on compression level, It is not direct mapping but\n> > can't we create some internal mapping instead of completely ignoring\n> > this option for lz4, or we can provide another option for lz4?\n>\n> We can, though I am not in favour of doing so. There is seemingly\n> little benefit for added complexity.\n\nI am really not sure what complexity you are talking about, do you\nmean since with pglz we were already providing the compression level\nso let it be as it is but with the new compression method you don't\nsee much benefit of providing compression level or speed?\n\n> > Should we also support LZ4 compression using dictionary?\n>\n> I would we should not do that. If my understanding is correct,\n> decompression would require the dictionary to be passed along.\n> The algorithm seems to be very competitive to the current compression\n> as is.\n\nI agree, we might not go for a dictionary because we would need to\ndictionary to decompress as well. So that will add an extra\ncomplexity for user.\n\n-- \nRegards,\nDilip Kumar\nEnterpriseDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Mon, 12 Jul 2021 11:10:24 +0530", "msg_from": "Dilip Kumar <dilipbalaut@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Mon, Jul 12, 2021 at 11:10:24AM +0530, Dilip Kumar wrote:\n> On Thu, Jul 8, 2021 at 7:48 PM <gkokolatos@pm.me> wrote:\n>> We can, though I am not in favour of doing so. There is seemingly\n>> little benefit for added complexity.\n> \n> I am really not sure what complexity you are talking about, do you\n> mean since with pglz we were already providing the compression level\n> so let it be as it is but with the new compression method you don't\n> see much benefit of providing compression level or speed?\n\nYou mean s/pglz/zlib/ here perhaps? I am not sure what Georgios has\nin mind, but my opinion stands on the latter: there is little benefit\nin making lz4 faster than the default and reduce compression, as the\ndefault is already a rather low CPU user.\n--\nMichael", "msg_date": "Mon, 12 Jul 2021 14:56:03 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "\n\n‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Monday, July 12th, 2021 at 07:56, Michael Paquier <michael@paquier.xyz> wrote:\n\n> On Mon, Jul 12, 2021 at 11:10:24AM +0530, Dilip Kumar wrote:\n>\n> > On Thu, Jul 8, 2021 at 7:48 PM gkokolatos@pm.me wrote:\n> >\n> > > We can, though I am not in favour of doing so. There is seemingly\n> > >\n> > > little benefit for added complexity.\n> >\n> > I am really not sure what complexity you are talking about, do you\n> >\n> > mean since with pglz we were already providing the compression level\n> >\n> > so let it be as it is but with the new compression method you don't\n> >\n> > see much benefit of providing compression level or speed?\n>\n> You mean s/pglz/zlib/ here perhaps? I am not sure what Georgios has\n>\n> in mind, but my opinion stands on the latter: there is little benefit\n>\n> in making lz4 faster than the default and reduce compression, as the\n>\n> default is already a rather low CPU user.\n\nThank you all for your comments. I am sitting on the same side as Micheal\non this one. The complexity is not huge, yet there will need to be code to\npass this option to the lz4 api and various test cases to verify for\ncorrectness and integrity. The burden of maintenance of this code vs the\nbenefit of the option, tilt the scale towards not including the option.\n\nOf course, I will happily provide whatever the community finds beneficial.\n\nCheers,\n//Georgios\n\n\n> -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n>\n> Michael\n\n\n", "msg_date": "Mon, 12 Jul 2021 09:33:44 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Mon, Jul 12, 2021 at 11:33 AM <gkokolatos@pm.me> wrote:\n>\n>\n>\n> ‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n>\n> On Monday, July 12th, 2021 at 07:56, Michael Paquier <michael@paquier.xyz> wrote:\n>\n> > On Mon, Jul 12, 2021 at 11:10:24AM +0530, Dilip Kumar wrote:\n> >\n> > > On Thu, Jul 8, 2021 at 7:48 PM gkokolatos@pm.me wrote:\n> > >\n> > > > We can, though I am not in favour of doing so. There is seemingly\n> > > >\n> > > > little benefit for added complexity.\n> > >\n> > > I am really not sure what complexity you are talking about, do you\n> > >\n> > > mean since with pglz we were already providing the compression level\n> > >\n> > > so let it be as it is but with the new compression method you don't\n> > >\n> > > see much benefit of providing compression level or speed?\n> >\n> > You mean s/pglz/zlib/ here perhaps? I am not sure what Georgios has\n> >\n> > in mind, but my opinion stands on the latter: there is little benefit\n> >\n> > in making lz4 faster than the default and reduce compression, as the\n> >\n> > default is already a rather low CPU user.\n>\n> Thank you all for your comments. I am sitting on the same side as Micheal\n> on this one. The complexity is not huge, yet there will need to be code to\n> pass this option to the lz4 api and various test cases to verify for\n> correctness and integrity. The burden of maintenance of this code vs the\n> benefit of the option, tilt the scale towards not including the option.\n\n+1 for skipping that one, at least for now, and sticking to\ndefault-only for lz4.\n\n-- \n Magnus Hagander\n Me: https://www.hagander.net/\n Work: https://www.redpill-linpro.com/\n\n\n", "msg_date": "Mon, 12 Jul 2021 11:45:29 +0200", "msg_from": "Magnus Hagander <magnus@hagander.net>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Mon, Jul 12, 2021 at 3:15 PM Magnus Hagander <magnus@hagander.net> wrote:\n>\n> On Mon, Jul 12, 2021 at 11:33 AM <gkokolatos@pm.me> wrote:\n> >\n> >\n> >\n> > ‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n> >\n> > On Monday, July 12th, 2021 at 07:56, Michael Paquier <michael@paquier.xyz> wrote:\n> >\n> > > On Mon, Jul 12, 2021 at 11:10:24AM +0530, Dilip Kumar wrote:\n> > >\n> > > > On Thu, Jul 8, 2021 at 7:48 PM gkokolatos@pm.me wrote:\n> > > >\n> > > > > We can, though I am not in favour of doing so. There is seemingly\n> > > > >\n> > > > > little benefit for added complexity.\n> > > >\n> > > > I am really not sure what complexity you are talking about, do you\n> > > >\n> > > > mean since with pglz we were already providing the compression level\n> > > >\n> > > > so let it be as it is but with the new compression method you don't\n> > > >\n> > > > see much benefit of providing compression level or speed?\n> > >\n> > > You mean s/pglz/zlib/ here perhaps? I am not sure what Georgios has\n> > >\n> > > in mind, but my opinion stands on the latter: there is little benefit\n> > >\n> > > in making lz4 faster than the default and reduce compression, as the\n> > >\n> > > default is already a rather low CPU user.\n> >\n> > Thank you all for your comments. I am sitting on the same side as Micheal\n> > on this one. The complexity is not huge, yet there will need to be code to\n> > pass this option to the lz4 api and various test cases to verify for\n> > correctness and integrity. The burden of maintenance of this code vs the\n> > benefit of the option, tilt the scale towards not including the option.\n>\n> +1 for skipping that one, at least for now, and sticking to\n> default-only for lz4.\n\nOkay, fine with me as well.\n\n-- \nRegards,\nDilip Kumar\nEnterpriseDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Mon, 12 Jul 2021 15:57:50 +0530", "msg_from": "Dilip Kumar <dilipbalaut@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Friday, July 9th, 2021 at 04:49, Michael Paquier <michael@paquier.xyz> wrote:\n\nHi,\n\nplease find v3 of the patch attached, rebased to the current head.\n\n> > Michael Paquier wrote:\n> >\n>\n> * http://www.zlib.org/rfc-gzip.html.\n>\n> - - For lz4 compressed segments\n> */\n> This comment is incomplete.\n\nFixed.\n\n> +#define IsLZ4CompressXLogFileName(fname) \\\n> - (strlen(fname) == XLOG_FNAME_LEN + strlen(\".lz4\") && \\\n> - strspn(fname, \"0123456789ABCDEF\") == XLOG_FNAME_LEN && \\\n> - strcmp((fname) + XLOG_FNAME_LEN, \".lz4\") == 0)\n>\n> +#define IsLZ4PartialCompressXLogFileName(fname) \\\n> - (strlen(fname) == XLOG_FNAME_LEN + strlen(\".lz4.partial\") && \\\n> - strspn(fname, \"0123456789ABCDEF\") == XLOG_FNAME_LEN && \\\n> - strcmp((fname) + XLOG_FNAME_LEN, \".lz4.partial\") == 0)\n>\n> This is getting complicated. Would it be better to change this stuff\n> and switch to a routine that checks if a segment has a valid name, is\n> partial, and the type of compression that applied to it? It seems to\n> me that we should group iszlibcompress and islz4compress together with\n> the options available through compression_method.\n\nAgreed and done.\n\n\n> - if (compresslevel != 0)\n> - {\n> - if (compression_method == COMPRESSION_NONE)\n> - {\n> - compression_method = COMPRESSION_ZLIB;\n> - }\n> - if (compression_method != COMPRESSION_ZLIB)\n> - {\n> - pg_log_error(\"cannot use --compress when \"\n> - \"--compression-method is not gzip\");\n> - fprintf(stderr, _(\"Try \\\\\"%s --help\\\\\" for more information.\\\\n\"),\n> - progname);\n> - exit(1);\n> - }\n> - }\n>\n> For compatibility where --compress enforces the use of zlib that would\n> work, but this needs a comment explaining the goal of this block. I\n> am wondering if it would be better to break the will and just complain\n> when specifying --compress without --compression-method though. That\n> would cause compatibility issues, but this would make folks aware of\n> the presence of LZ4, which does not sound bad to me either as ZLIB is\n> slower than LZ4 on all fronts.\n\nFair point. In v3 of the patch --compress requires --compression-method. Passing\n0 as value errors out.\n\n> - else if (compression_method == COMPRESSION_ZLIB)\n> - {\n> - pg_log_error(\"cannot use --compression-method gzip when \"\n> - \"--compression is 0\");\n> - fprintf(stderr, _(\"Try \\\\\"%s --help\\\\\" for more information.\\\\n\"),\n> - progname);\n> - exit(1);\n> - }\n>\n> Hmm. It would be more natural to enforce a default compression level\n> in this case? The user is asking for a compression with zlib here.\n\nAgreed. A default value of 5, which is in the middle point of options, has been\ndefined and used.\n\nIn addition, the tests have been adjusted to mimic the newly added gzip tests.\n\n\nCheers,\n//Georgios\n\n\n> ---------------------------------------------------------------\n>\n> Michael", "msg_date": "Fri, 10 Sep 2021 08:21:51 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "@@ -250,14 +302,18 @@ FindStreamingStart(uint32 *tli)\r\n \t\t/*\r\n \t\t * Check that the segment has the right size, if it's supposed to be\r\n \t\t * completed. For non-compressed segments just check the on-disk size\r\n-\t\t * and see if it matches a completed segment. For compressed segments,\r\n-\t\t * look at the last 4 bytes of the compressed file, which is where the\r\n-\t\t * uncompressed size is located for gz files with a size lower than\r\n-\t\t * 4GB, and then compare it to the size of a completed segment. The 4\r\n-\t\t * last bytes correspond to the ISIZE member according to\r\n+\t\t * and see if it matches a completed segment. For zlib compressed\r\n+\t\t * segments, look at the last 4 bytes of the compressed file, which is\r\n+\t\t * where the uncompressed size is located for gz files with a size lower\r\n+\t\t * than 4GB, and then compare it to the size of a completed segment.\r\n+\t\t * The 4 last bytes correspond to the ISIZE member according to\r\n \t\t * http://www.zlib.org/rfc-gzip.html.\r\n+\t\t *\r\n+\t\t * For lz4 compressed segments read the header using the exposed API and\r\n+\t\t * compare the uncompressed file size, stored in\r\n+\t\t * LZ4F_frameInfo_t{.contentSize}, to that of a completed segment.\r\n \t\t */\r\n-\t\tif (!ispartial && !iscompress)\r\n+\t\tif (!ispartial && wal_compression_method == COMPRESSION_NONE)\r\n \t\t{\r\n \t\t\tstruct stat statbuf;\r\n \t\t\tchar\t\tfullpath[MAXPGPATH * 2];\r\n@@ -276,7 +332,7 @@ FindStreamingStart(uint32 *tli)\r\n \t\t\t\tcontinue;\r\n \t\t\t}\r\n \t\t}\r\n-\t\telse if (!ispartial && iscompress)\r\n+\t\telse if (!ispartial && wal_compression_method == COMPRESSION_ZLIB)\r\n \t\t{\r\n \t\t\tint\t\t\tfd;\r\n \t\t\tchar\t\tbuf[4];\r\n@@ -322,6 +378,70 @@ FindStreamingStart(uint32 *tli)\r\n \t\t\t\tcontinue;\r\n \t\t\t}\r\n \t\t}\r\n+\t\telse if (!ispartial && compression_method == COMPRESSION_LZ4)\r\n+\t\t{\r\n+#ifdef HAVE_LIBLZ4\r\n+\t\t\tint\t\t\tfd;\r\n+\t\t\tint\t\t\tr;\r\n+\t\t\tsize_t\t\tconsumed_len = LZ4F_HEADER_SIZE_MAX;\r\n+\t\t\tchar\t buf[LZ4F_HEADER_SIZE_MAX];\r\n+\t\t\tchar\t\tfullpath[MAXPGPATH * 2];\r\n+\t\t\tLZ4F_frameInfo_t frame_info = { 0 };\r\n+\t\t\tLZ4F_decompressionContext_t ctx = NULL;\r\n+\r\n+\t\t\tsnprintf(fullpath, sizeof(fullpath), \"%s/%s\", basedir, dirent->d_name);\r\n+\r\n+\t\t\tfd = open(fullpath, O_RDONLY | PG_BINARY, 0);\r\n\r\nShould close the fd before exit or abort.", "msg_date": "Sat, 11 Sep 2021 05:02:42 +0000", "msg_from": "Jian Guo <gjian@vmware.com>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Saturday, September 11th, 2021 at 07:02, Jian Guo <gjian@vmware.com> wrote:\n\nHi,\n\nthank you for looking at the patch.\n\n> - \tLZ4F_decompressionContext_t ctx = NULL;\n> - \tsnprintf(fullpath, sizeof(fullpath), \"%s/%s\", basedir, dirent->d_name);\n> - \tfd = open(fullpath, O_RDONLY | PG_BINARY, 0);\n>\n> Should close the fd before exit or abort.\n\nYou are correct. Please find version 4 attached.\n\nCheers,\n//Georgios", "msg_date": "Mon, 13 Sep 2021 08:35:43 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Fri, Sep 10, 2021 at 08:21:51AM +0000, gkokolatos@pm.me wrote:\n> Agreed. A default value of 5, which is in the middle point of options, has been\n> defined and used.\n> \n> In addition, the tests have been adjusted to mimic the newly added gzip tests.\n\nLooking at lz4frame.h, there is LZ4F_flush() that allows to compress\nimmediately any data buffered in the frame context but not compressed\nyet. It seems to me that dir_sync() should be extended to support\nLZ4.\n\n export GZIP_PROGRAM=$(GZIP)\n+export LZ4\n[...]\n+PGAC_PATH_PROGS(LZ4, lz4)\n+\n PGAC_PATH_BISON\nThe part of the test assigning LZ4 is fine, but I'd rather switch to a\nlogic à-la-gzip, where we just save \"lz4\" in Makefile.global.in,\nsaving cycles in ./configure.\n\n+static bool\n+is_xlogfilename(const char *filename, bool *ispartial,\n+ WalCompressionMethod *wal_compression_method)\nI like the set of simplifications you have done here to detection if a\nsegment is partial and which compression method applies to it.\n\n+ if (compression_method != COMPRESSION_ZLIB && compresslevel != 0)\n+ {\n+ pg_log_error(\"can only use --compress together with \"\n+ \"--compression-method=gzip\");\n+#ifndef HAVE_LIBLZ4\n+ pg_log_error(\"this build does not support compression via gzip\");\n+#endif\n\ns/HAVE_LIBLZ4/HAVE_LIBZ/.\n\n+$primary->command_fails(\n+ [\n+ 'pg_receivewal', '-D', $stream_dir, '--compression-method', 'lz4',\n+ '--compress', '1'\n+ ],\n+ 'failure if --compression-method=lz4 specified with --compress');\n\nThis would fail when the code is not built with LZ4 with a non-zero\nerror code but with an error that is not what we expect. I think that\nyou should use $primary->command_fails_like() instead. That's quite\nnew, as of de1d4fe. The matching error pattern will need to change\ndepending on if we build the code with LZ4 or not. A simpler method\nis to use --compression-method=none, to bypass the first round of\nerrors and make that build-independent, but that feels incomplete if\nyou want to tie that to LZ4.\n\n+ pg_log_warning(\"compressed segment file \\\"%s\\\" has incorrect header size %lu, skipping\",\n+ dirent->d_name, consumed_len);\n+ LZ4F_freeDecompressionContext(ctx);\nI agree that skipping all those cases when calculating the streaming\nstart point is more consistent.\n\n+ if (r < 0)\n+ pg_log_error(\"could not read compressed file \\\"%s\\\": %m\",\n+ fullpath);\n+ else\n+ pg_log_error(\"could not read compressed file \\\"%s\\\": read %d of %lu\",\n+ fullpath, r, sizeof(buf));\nLet's same in translation effort here by just using \"could not read\",\netc. by removing the term \"compressed\".\n\n+ pg_log_error(\"can only use --compress together with \"\n+ \"--compression-method=gzip\");\nBetter to keep these in a single line to ease grepping. We don't care\nif error strings are longer than the 72-80 character limit.\n\n+/* Size of lz4 input chunk for .lz4 */\n+#define LZ4_IN_SIZE 4096\nWhy this choice? Does it need to use LZ4_COMPRESSBOUND?\n\n- if (dir_data->compression > 0)\n+ if (dir_data->compression_method == COMPRESSION_ZLIB)\n gzclose(gzfp);\n else\nHm. The addition of the header in dir_open_for_write() uses\nLZ4F_compressBegin. Shouldn't we use LZ4F_compressEnd() if\nfsync_fname() or fsync_parent_path() fail on top of closing the fd?\nThat would be more consistent IMO to do so. The patch does that in\ndir_close(). You should do that additionally if there is a failure\nwhen writing the header.\n\n+ pg_log_error(\"invalid compression-method \\\"%s\\\"\", optarg);\n+ exit(1);\nThis could be \"invalid value \\\"%s\\\" for option %s\", see\noption_parse_int() in fe_utils/option_utils.c.\n\nAfter running the TAP tests, the LZ4 section is failing as follows:\npg_receivewal: stopped log streaming at 0/4001950 (timeline 1)\npg_receivewal: not renaming \"000000010000000000000004.partial\", segment is not complete\npg_receivewal: error: could not close file \"000000010000000000000004\": Undefined error: 0\nok 26 - streaming some WAL using --compression-method=lz4\nThe third log line I am quoting here looks unexpected to me. Saying\nthat, the tests integrate nicely with the existing code.\n\nAs mentioned upthread, LZ4-compressed files don't store the file size\nby default. I think that we should document that better in the code\nand the documentation, in two ways at least:\n- Add some comments mentioning lz4 --content-size, with at least one\nin FindStreamingStart().\n- Add a new paragraph in the documentation of --compression-method.\n\nThe name of the compression method is \"LZ4\" with upper-case\ncharacters. Some comments in the code and the tests, as well as the\ndocs, are not careful about that.\n--\nMichael", "msg_date": "Wed, 15 Sep 2021 15:46:50 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Wednesday, September 15th, 2021 at 08:46, Michael Paquier michael@paquier.xyz wrote:\n\nHi,\n\nthank you for the review.\n\n> On Fri, Sep 10, 2021 at 08:21:51AM +0000, gkokolatos@pm.me wrote:\n>\n> > Agreed. A default value of 5, which is in the middle point of options, has been\n> > defined and used.\n> > In addition, the tests have been adjusted to mimic the newly added gzip tests.\n>\n> Looking at lz4frame.h, there is LZ4F_flush() that allows to compress\n> immediately any data buffered in the frame context but not compressed\n> yet. It seems to me that dir_sync() should be extended to support\n> LZ4.\n\nAgreed. LZ4F_flush() calls have been added where appropriate.\n\n>\n> export GZIP_PROGRAM=$(GZIP)\n> +export LZ4\n> [...]\n> +PGAC_PATH_PROGS(LZ4, lz4)\n> -\n> PGAC_PATH_BISON\n>\n> The part of the test assigning LZ4 is fine, but I'd rather switch to a\n> logic à-la-gzip, where we just save \"lz4\" in Makefile.global.in,\n> saving cycles in ./configure.\n\nReluctantly agreed.\n\n>\n> +static bool\n> +is_xlogfilename(const char *filename, bool *ispartial,\n> - WalCompressionMethod *wal_compression_method)\n>\n>\n> I like the set of simplifications you have done here to detection if a\n> segment is partial and which compression method applies to it.\n\nThank you very much.\n\n>\n> + if (compression_method != COMPRESSION_ZLIB && compresslevel != 0)\n> + {\n> + pg_log_error(\"can only use --compress together with \"\n> + \"--compression-method=gzip\");\n> +#ifndef HAVE_LIBLZ4\n> + pg_log_error(\"this build does not support compression via gzip\");\n> +#endif\n>\n> s/HAVE_LIBLZ4/HAVE_LIBZ/.\n>\n\nFixed.\n\n> +$primary->command_fails(\n> + [\n> + 'pg_receivewal', '-D', $stream_dir, '--compression-method', 'lz4',\n> + '--compress', '1'\n> + ],\n> + 'failure if --compression-method=lz4 specified with --compress');\n> This would fail when the code is not built with LZ4 with a non-zero\n> error code but with an error that is not what we expect. I think that\n> you should use $primary->command_fails_like() instead. That's quite\n> new, as of de1d4fe. The matching error pattern will need to change\n> depending on if we build the code with LZ4 or not. A simpler method\n> is to use --compression-method=none, to bypass the first round of\n> errors and make that build-independent, but that feels incomplete if\n> you want to tie that to LZ4.\n\nFixed. Now a regex has been added to address both builds.\n\n>\n> + pg_log_warning(\"compressed segment file \\\\\\\\\"%s\\\\\\\\\" has incorrect header size %lu, skipping\",\n> + dirent->d_name, consumed_len);\n> + LZ4F_freeDecompressionContext(ctx);\n>\n> I agree that skipping all those cases when calculating the streaming\n> start point is more consistent.\n\nThanks.\n\n>\n> + if (r < 0)\n> + pg_log_error(\"could not read compressed file \\\\\\\\\"%s\\\\\\\\\": %m\",\n> + fullpath);\n> + else\n> + pg_log_error(\"could not read compressed file \\\\\\\\\"%s\\\\\\\\\": read %d of %lu\",\n> + fullpath, r, sizeof(buf));\n>\n> Let's same in translation effort here by just using \"could not read\",\n> etc. by removing the term \"compressed\".\n\nThe string is also present in the gzip compressed case, i.e. prior to this patch.\nThe translation effort will not be reduced by changing this string only.\n\n> + pg_log_error(\"can only use --compress together with \"\n> + \"--compression-method=gzip\");\n>\n> Better to keep these in a single line to ease grepping. We don't care\n> if error strings are longer than the 72-80 character limit.\n\nFixed.\n\n> +/* Size of lz4 input chunk for .lz4 */\n> +#define LZ4_IN_SIZE 4096\n>\n> Why this choice? Does it need to use LZ4_COMPRESSBOUND?\n\nThis value is used in order to calculate the bound, before any buffer is\nreceived. Then when we receive buffer, we consume them in LZ4_IN_SIZE chunks.\nNote the call to LZ4F_compressBound() in dir_open_for_write().\n\n+ ctx_out = LZ4F_createCompressionContext(&ctx, LZ4F_VERSION);\n+ lz4bufsize = LZ4F_compressBound(LZ4_IN_SIZE, &lz4preferences);\n\n\n> - if (dir_data->compression > 0)\n> + if (dir_data->compression_method == COMPRESSION_ZLIB)\n> gzclose(gzfp);\n> else\n>\n> Hm. The addition of the header in dir_open_for_write() uses\n> LZ4F_compressBegin. Shouldn't we use LZ4F_compressEnd() if\n> fsync_fname() or fsync_parent_path() fail on top of closing the fd?\n> That would be more consistent IMO to do so. The patch does that in\n> dir_close(). You should do that additionally if there is a failure\n> when writing the header.\n\nFixed. LZ4_flush() have been added where appropriate.\n\n>\n> + pg_log_error(\"invalid compression-method \\\"%s\\\", optarg);\n> + exit(1);\n>\n> This could be \"invalid value \\\"%s\\\" for option %s\", see\n> option_parse_int() in fe_utils/option_utils.c.\n\nFixed.\n\n>\n> After running the TAP tests, the LZ4 section is failing as follows:\n> pg_receivewal: stopped log streaming at 0/4001950 (timeline 1)\n> pg_receivewal: not renaming \"000000010000000000000004.partial\", segment is not complete\n> pg_receivewal: error: could not close file \"000000010000000000000004\": Undefined error: 0\n> ok 26 - streaming some WAL using --compression-method=lz4\n> The third log line I am quoting here looks unexpected to me. Saying\n> that, the tests integrate nicely with the existing code.\n\nStrange that you got an undefined error. I managed to _almost_ reproduce\nwith the log line looking like:\n\n pg_receivewal: error: could not close file \"000000010000000000000004\": Success\n\nThis was due to a call to LZ4F_compressEnd() on a partial file. In v5 of\nthe patch, LZ4F_compressEnd() is called when the WalCloseMethod is CLOSE_NORMAL\notherwise LZ4F_flush is used. This seems to remove the log line and a\nmore consistent behaviour overall.\n\nIn passing, close_walfile() has been taught to consider compression in\nthe filename, via get_file_name().\n\n> As mentioned upthread, LZ4-compressed files don't store the file size\n> by default. I think that we should document that better in the code\n> and the documentation, in two ways at least:\n>\n> - Add some comments mentioning lz4 --content-size, with at least one\n> in FindStreamingStart().\n> - Add a new paragraph in the documentation of --compression-method.\n\nApologies, I didn't understood what you meant upstream. Now I do.\n\nHow about:\n\nBy default, LZ4-compressed files don't store the uncompressed file size.\nHowever, the program pg_receivewal, does store that information. As a\nconsequence, the file does not need to be decompressed if the external\nprogram is used, e.g. lz4 -t --content-size <file>, will report the\nuncompressed file size.\n\n\n> The name of the compression method is \"LZ4\" with upper-case\n> characters. Some comments in the code and the tests, as well as the\n> docs, are not careful about that.\n\nHopefully fixed.\n\nCheers,\n//Georgios\n\n> --\n> Michael\n>", "msg_date": "Thu, 16 Sep 2021 15:17:15 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Thu, Sep 16, 2021 at 03:17:15PM +0000, gkokolatos@pm.me wrote:\n> Hopefully fixed.\n\nThanks for the new version. I have put my hands on the patch, and\nbegan reviewing its internals with LZ4. I am not done with it yet,\nand I have noticed some places that could be improved (error handling,\nsome uses of LZ4F_flush() that should be replaced LZ4F_compressEnd(),\nand more tweaks). I'll send an updated version once I complete my\nreview, but that looks rather solid overall.\n\nThe changes done in close_walfile()@receivelog.c are useful taken\nindependently, so I have applied these separately.\n--\nMichael", "msg_date": "Fri, 17 Sep 2021 16:39:41 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "\n\n‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Friday, September 17th, 2021 at 09:39, Michael Paquier <michael@paquier.xyz> wrote:\n\n> On Thu, Sep 16, 2021 at 03:17:15PM +0000, gkokolatos@pm.me wrote:\n>\n> > Hopefully fixed.\n>\n> Thanks for the new version. I have put my hands on the patch, and\n> began reviewing its internals with LZ4. I am not done with it yet,\n> and I have noticed some places that could be improved (error handling,\n> some uses of LZ4F_flush() that should be replaced LZ4F_compressEnd(),\n> and more tweaks). I'll send an updated version once I complete my\n> review, but that looks rather solid overall.\n\nThanks! Looking forward to seeing it!\n\n> The changes done in close_walfile()@receivelog.c are useful taken\n> independently, so I have applied these separately.\n\nYeah, I was considering it to split them over to a separate commit,\nthen decided against it. Will do so in the future.\n\nCheers,\n//Georgios\n\n> --------------------------------------------------------------------\n> Michael\n\n\n", "msg_date": "Fri, 17 Sep 2021 08:12:42 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Fri, Sep 17, 2021 at 08:12:42AM +0000, gkokolatos@pm.me wrote:\n> Yeah, I was considering it to split them over to a separate commit,\n> then decided against it. Will do so in the future.\n\nI have been digging into the issue I saw in the TAP tests when closing\na segment, and found the problem. The way you manipulate\nframeInfo.contentSize by just setting it to WalSegSz when *opening*\na segment causes problems on LZ4F_compressEnd(), making the code\nthrow a ERROR_frameSize_wrong. In lz4frame.c, the end of\nLZ4F_compressEnd() triggers this check and the error:\n if (cctxPtr->prefs.frameInfo.contentSize) {\n if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)\n return err0r(LZ4F_ERROR_frameSize_wrong);\n }\n\nWe don't really care about contentSize as long as a segment is not\ncompleted. Rather than filling contentSize all the time we write\nsomething, we'd better update frameInfo once the segment is\ncompleted and closed. That would also take take of the error as this\nis not checked if contentSize is 0. It seems to me that we should\nfill in the information when doing a CLOSE_NORMAL.\n\n- if (stream->walmethod->compression() == 0 &&\n+ if (stream->walmethod->compression() == COMPRESSION_NONE &&\n stream->walmethod->existsfile(fn))\nThis one was a more serious issue, as the compression() callback would\nreturn an integer for the compression level but v5 compared it to a\nWalCompressionMethod. In order to take care of this issue, mainly for\npg_basebackup, I think that we have to update the compression()\ncallback to compression_method(), and it is cleaner to save the\ncompression method as well as the compression level for the tar data.\n\nI am attaching a new patch, on which I have done many tweaks and\nadjustments while reviewing it. The attached patch fixes the second\nissue, and I have done nothing about the first issue yet, but that\nshould be simple enough to address as this needs an update of the\nframe info when closing a completed segment. Could you look at it?\n\nThanks,\n--\nMichael", "msg_date": "Sat, 18 Sep 2021 15:18:16 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Saturday, September 18th, 2021 at 8:18 AM, Michael Paquier <michael@paquier.xyz> wrote:\n> On Fri, Sep 17, 2021 at 08:12:42AM +0000, gkokolatos@pm.me wrote:\n>\n> I have been digging into the issue I saw in the TAP tests when closing\n> a segment, and found the problem. The way you manipulate\n> frameInfo.contentSize by just setting it to WalSegSz when *opening*\n> a segment causes problems on LZ4F_compressEnd(), making the code\n> throw a ERROR_frameSize_wrong. In lz4frame.c, the end of\n> LZ4F_compressEnd() triggers this check and the error:\n> if (cctxPtr->prefs.frameInfo.contentSize) {\n> if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)\n> return err0r(LZ4F_ERROR_frameSize_wrong);\n> }\n>\n> We don't really care about contentSize as long as a segment is not\n> completed. Rather than filling contentSize all the time we write\n> something, we'd better update frameInfo once the segment is\n> completed and closed. That would also take take of the error as this\n> is not checked if contentSize is 0. It seems to me that we should\n> fill in the information when doing a CLOSE_NORMAL.\n\nThank you for the comment. I think that the opposite should be done. At the time\nthat the file is closed, the header is already written to disk. We have no way\nto know that is not. If we need to go back to refill the information, we will\nhave to ask for the API to produce a new header. There is little guarantee that\nthe header size will be the same and as a consequence we will have to shift\nthe actual data around.\n\nIn the attached, the header is rewritten only when closing an incomplete\nsegment. For all intents and purposes that segment is not usable. However there\nmight be custom scripts that might want to attempt to parse even an otherwise\nunusable file.\n\nA different and easier approach would be to simply prepare the LZ4 context for\nfuture actions and simply ignore the file.\n\n>\n> - if (stream->walmethod->compression() == 0 &&\n> + if (stream->walmethod->compression() == COMPRESSION_NONE &&\n> stream->walmethod->existsfile(fn))\n> This one was a more serious issue, as the compression() callback would\n> return an integer for the compression level but v5 compared it to a\n> WalCompressionMethod. In order to take care of this issue, mainly for\n> pg_basebackup, I think that we have to update the compression()\n> callback to compression_method(), and it is cleaner to save the\n> compression method as well as the compression level for the tar data.\n>\n\nAgreed.\n\n> I am attaching a new patch, on which I have done many tweaks and\n> adjustments while reviewing it. The attached patch fixes the second\n> issue, and I have done nothing about the first issue yet, but that\n> should be simple enough to address as this needs an update of the\n> frame info when closing a completed segment. Could you look at it?\n>\n\nThank you. Find v7 attached, rebased to the current head.\n\nCheers,\n//Georgios\n\n> Thanks,\n> --\n> Michae", "msg_date": "Fri, 29 Oct 2021 09:45:41 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Fri, Oct 29, 2021 at 09:45:41AM +0000, gkokolatos@pm.me wrote:\n> On Saturday, September 18th, 2021 at 8:18 AM, Michael Paquier <michael@paquier.xyz> wrote:\n>> We don't really care about contentSize as long as a segment is not\n>> completed. Rather than filling contentSize all the time we write\n>> something, we'd better update frameInfo once the segment is\n>> completed and closed. That would also take take of the error as this\n>> is not checked if contentSize is 0. It seems to me that we should\n>> fill in the information when doing a CLOSE_NORMAL.\n> \n> Thank you for the comment. I think that the opposite should be done. At the time\n> that the file is closed, the header is already written to disk. We have no way\n> to know that is not. If we need to go back to refill the information, we will\n> have to ask for the API to produce a new header. There is little guarantee that\n> the header size will be the same and as a consequence we will have to shift\n> the actual data around.\n\nWhy would the header size change between the moment the segment is\nbegun and it is finished? We could store it in memory and write it\nagain when the segment is closed instead, even if it means to fseek()\nback to the beginning of the file once the segment is completed.\nStoring WalSegSz from the moment a segment is opened makes the code\nweaker to SIGINTs and the kind, so this does not fix the problem I\nmentioned previously :/\n\n> In the attached, the header is rewritten only when closing an incomplete\n> segment. For all intents and purposes that segment is not usable. However there\n> might be custom scripts that might want to attempt to parse even an otherwise\n> unusable file.\n> \n> A different and easier approach would be to simply prepare the LZ4 context for\n> future actions and simply ignore the file.\n\nI am not sure what you mean by \"ignore\" here. Do you mean to store 0\nin contentSize when opening the segment and rewriting again the header\nonce the segment is completed?\n--\nMichael", "msg_date": "Fri, 29 Oct 2021 20:38:33 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Fri, Oct 29, 2021 at 08:38:33PM +0900, Michael Paquier wrote:\n> Why would the header size change between the moment the segment is\n> begun and it is finished? We could store it in memory and write it\n> again when the segment is closed instead, even if it means to fseek()\n> back to the beginning of the file once the segment is completed.\n> Storing WalSegSz from the moment a segment is opened makes the code\n> weaker to SIGINTs and the kind, so this does not fix the problem I\n> mentioned previously :/\n\nI got to think more on this one, and another argument against storing\nan incorrect contentSize while the segment is not completed would\nbreak the case of partial segments with --synchronous, where we should\nstill be able to recover as much data flushed as possible. Like zlib,\nwhere one has to complete the partial segment with zeros after\ndecompression until the WAL segment size is reached, we should be able\nto support that with LZ4. (I have saved some customer data in the\npast thanks to this property, btw.)\n\nIt is proves to be too fancy to rewrite the header with a correct\ncontentSize once the segment is completed, another way would be to\nenforce a decompression of each segment in-memory. The advantage of\nthis method is that we would be a maximum portable. For example, if\none begins to use pg_receivewal on an archive directory where we used\nan archive_command, we would be able to grab the starting LSN. That's\nmore costly of course, but the LZ4 protocol does not make that easy\neither with its chunk protocol. By the way, you are right that we\nshould worry about the variability in size of the header as we only\nhave the guarantee that it can be within a give window. I missed\nthat and lz4frame.h mentions that around LZ4F_headerSize :/\n\nIt would be good to test with many segments, but could we think about\njust relying on LZ4F_decompress() with a frame and compute the\ndecompressed size by ourselves? At least that will never break, and\nthat would work in all the cases aimed by pg_receivewal.\n--\nMichael", "msg_date": "Mon, 1 Nov 2021 17:09:08 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "\n\n‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Monday, November 1st, 2021 at 9:09 AM, Michael Paquier <michael@paquier.xyz> wrote:\n\n> On Fri, Oct 29, 2021 at 08:38:33PM +0900, Michael Paquier wrote:\n>\n> It would be good to test with many segments, but could we think about\n> just relying on LZ4F_decompress() with a frame and compute the\n> decompressed size by ourselves? At least that will never break, and\n> that would work in all the cases aimed by pg_receivewal.\n\nAgreed.\n\nI have already started on v8 of the patch with that technique. I should\nbe able to update the thread soon.\n\n>\n> Michael\n\n\n", "msg_date": "Mon, 01 Nov 2021 08:39:59 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Mon, Nov 01, 2021 at 08:39:59AM +0000, gkokolatos@pm.me wrote:\n> Agreed.\n> \n> I have already started on v8 of the patch with that technique. I should\n> be able to update the thread soon.\n\nNice, thanks!\n--\nMichael", "msg_date": "Tue, 2 Nov 2021 07:27:50 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Tue, Nov 02, 2021 at 07:27:50AM +0900, Michael Paquier wrote:\n> On Mon, Nov 01, 2021 at 08:39:59AM +0000, gkokolatos@pm.me wrote:\n> > Agreed.\n> > \n> > I have already started on v8 of the patch with that technique. I should\n> > be able to update the thread soon.\n> \n> Nice, thanks!\n\nBy the way, I was reading the last version of the patch today, and\nit seems to me that we could make the commit history if we split the\npatch into two parts:\n- One that introduces the new option --compression-method and\nis_xlogfilename(), while reworking --compress (including documentation\nchanges).\n- One to have LZ4 support.\n\nv7 has been using \"gzip\" for the option name, but I think that it\nwould be more consistent to use \"zlib\" instead.\n--\nMichael", "msg_date": "Tue, 2 Nov 2021 17:51:45 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Tuesday, November 2nd, 2021 at 9:51 AM, Michael Paquier <michael@paquier.xyz> wrote:\n\n> On Tue, Nov 02, 2021 at 07:27:50AM +0900, Michael Paquier wrote:\n> > On Mon, Nov 01, 2021 at 08:39:59AM +0000, gkokolatos@pm.me wrote:\n> > > Agreed.\n> > >\n> > > I have already started on v8 of the patch with that technique. I should\n> > > be able to update the thread soon.\n> >\n> > Nice, thanks!\n>\n\nA pleasure. Please find it in the attached v8-0002 patch.\n\n> By the way, I was reading the last version of the patch today, and\n> it seems to me that we could make the commit history if we split the\n> patch into two parts:\n> - One that introduces the new option --compression-method and\n> is_xlogfilename(), while reworking --compress (including documentation\n> changes).\n> - One to have LZ4 support.\n\nAgreed.\n\n>\n> v7 has been using \"gzip\" for the option name, but I think that it\n> would be more consistent to use \"zlib\" instead.\n\nDone.\n\nCheers,\n//Georgios\n\n> --\n> Michael", "msg_date": "Tue, 02 Nov 2021 11:36:49 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Tue, Nov 2, 2021 at 9:51 AM Michael Paquier <michael@paquier.xyz> wrote:\n\n> On Tue, Nov 02, 2021 at 07:27:50AM +0900, Michael Paquier wrote:\n> > On Mon, Nov 01, 2021 at 08:39:59AM +0000, gkokolatos@pm.me wrote:\n> > > Agreed.\n> > >\n> > > I have already started on v8 of the patch with that technique. I should\n> > > be able to update the thread soon.\n> >\n> > Nice, thanks!\n>\n> By the way, I was reading the last version of the patch today, and\n> it seems to me that we could make the commit history if we split the\n> patch into two parts:\n> - One that introduces the new option --compression-method and\n> is_xlogfilename(), while reworking --compress (including documentation\n> changes).\n> - One to have LZ4 support.\n>\n> v7 has been using \"gzip\" for the option name, but I think that it\n> would be more consistent to use \"zlib\" instead.\n>\n\nUm, why?\n\nThat we are using zlib to provide the compression is an implementation\ndetail. Whereas AFAIK \"gzip\" refers to both the program and the format. And\nwe specifically use the gzxxx() functions in zlib, in order to produce gzip\nformat.\n\nI think for the end user, it is strictly better to name it \"gzip\", and\ngiven that the target of this option is the end user we should do so. (It'd\nbe different it we were talking about a build-time parameter to configure).\n\n-- \n Magnus Hagander\n Me: https://www.hagander.net/ <http://www.hagander.net/>\n Work: https://www.redpill-linpro.com/ <http://www.redpill-linpro.com/>\n\nOn Tue, Nov 2, 2021 at 9:51 AM Michael Paquier <michael@paquier.xyz> wrote:On Tue, Nov 02, 2021 at 07:27:50AM +0900, Michael Paquier wrote:\n> On Mon, Nov 01, 2021 at 08:39:59AM +0000, gkokolatos@pm.me wrote:\n> > Agreed.\n> > \n> > I have already started on v8 of the patch with that technique. I should\n> > be able to update the thread soon.\n> \n> Nice, thanks!\n\nBy the way, I was reading the last version of the patch today, and\nit seems to me that we could make the commit history if we split the\npatch into two parts:\n- One that introduces the new option --compression-method and\nis_xlogfilename(), while reworking --compress (including documentation\nchanges).\n- One to have LZ4 support.\n\nv7 has been using \"gzip\" for the option name, but I think that it\nwould be more consistent to use \"zlib\" instead.Um, why?That we are using zlib to provide the compression is an implementation detail. Whereas AFAIK \"gzip\" refers to both the program and the format. And we specifically use the gzxxx() functions in zlib, in order to produce gzip format.I think for the end user, it is strictly better to name it \"gzip\", and given that the target of this option is the end user we should do so. (It'd be different it we were talking about a build-time parameter to configure).--  Magnus Hagander Me: https://www.hagander.net/ Work: https://www.redpill-linpro.com/", "msg_date": "Tue, 2 Nov 2021 13:17:05 +0100", "msg_from": "Magnus Hagander <magnus@hagander.net>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Tue, Nov 2, 2021 at 8:17 AM Magnus Hagander <magnus@hagander.net> wrote:\n> Um, why?\n>\n> That we are using zlib to provide the compression is an implementation detail. Whereas AFAIK \"gzip\" refers to both the program and the format. And we specifically use the gzxxx() functions in zlib, in order to produce gzip format.\n>\n> I think for the end user, it is strictly better to name it \"gzip\", and given that the target of this option is the end user we should do so. (It'd be different it we were talking about a build-time parameter to configure).\n\nI agree. Also, I think there's actually a file format called \"zlib\"\nwhich is slightly different from the \"gzip\" format, and you have to be\ncareful not to generate the wrong one.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 2 Nov 2021 12:31:47 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Tue, Nov 02, 2021 at 12:31:47PM -0400, Robert Haas wrote:\n> On Tue, Nov 2, 2021 at 8:17 AM Magnus Hagander <magnus@hagander.net> wrote:\n>> I think for the end user, it is strictly better to name it \"gzip\",\n>> and given that the target of this option is the end user we should\n>> do so. (It'd be different it we were talking about a build-time\n>> parameter to configure). \n> \n> I agree. Also, I think there's actually a file format called \"zlib\"\n> which is slightly different from the \"gzip\" format, and you have to be\n> careful not to generate the wrong one.\n\nOkay, fine by me. It would be better to be also consistent in\nWalCompressionMethods once we switch to this option value, then.\n--\nMichael", "msg_date": "Wed, 3 Nov 2021 08:23:46 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "\n\n‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Wednesday, November 3rd, 2021 at 12:23 AM, Michael Paquier <michael@paquier.xyz> wrote:\n\n> On Tue, Nov 02, 2021 at 12:31:47PM -0400, Robert Haas wrote:\n>> On Tue, Nov 2, 2021 at 8:17 AM Magnus Hagander magnus@hagander.net wrote:\n>>> I think for the end user, it is strictly better to name it \"gzip\",\n>>> and given that the target of this option is the end user we should\n>>> do so. (It'd be different it we were talking about a build-time\n>>> parameter to configure).\n>>\n>> I agree. Also, I think there's actually a file format called \"zlib\"\n>> which is slightly different from the \"gzip\" format, and you have to be\n>> careful not to generate the wrong one.\n>\n> Okay, fine by me. It would be better to be also consistent in\n> WalCompressionMethods once we switch to this option value, then.\n\nI will revert to gzip for version 9. Should be out shortly.\n\nCheers,\n//Georgios\n>\n> Michael\n\n\n", "msg_date": "Wed, 03 Nov 2021 08:05:53 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Wednesday, November 3rd, 2021 at 9:05 AM, <gkokolatos@pm.me> wrote:\n\n> ‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n>\n> On Wednesday, November 3rd, 2021 at 12:23 AM, Michael Paquier michael@paquier.xyz wrote:\n> > On Tue, Nov 02, 2021 at 12:31:47PM -0400, Robert Haas wrote:\n> > > On Tue, Nov 2, 2021 at 8:17 AM Magnus Hagander magnus@hagander.net wrote:\n> > >\n> > > > I think for the end user, it is strictly better to name it \"gzip\",\n> > > > and given that the target of this option is the end user we should\n> > > > do so. (It'd be different it we were talking about a build-time\n> > > > parameter to configure).\n> > >\n> > > I agree. Also, I think there's actually a file format called \"zlib\"\n> > > which is slightly different from the \"gzip\" format, and you have to be\n> > > careful not to generate the wrong one.\n> >\n> > Okay, fine by me. It would be better to be also consistent in\n> > WalCompressionMethods once we switch to this option value, then.\n>\n> I will revert to gzip for version 9. Should be out shortly.\n\nPlease find v9 attached.\n\nCheers,\n//Georgios\n>\n> > Michael", "msg_date": "Wed, 03 Nov 2021 09:11:24 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Wed, Nov 03, 2021 at 09:11:24AM +0000, gkokolatos@pm.me wrote:\n> Please find v9 attached.\n\nThanks. I have looked at 0001 today, and applied it after fixing a\ncouple of issues. From memory:\n- zlib.h was missing from pg_receivewal.c, issue that I noticed after\nremoving the redefinition of Z_DEFAULT_COMPRESSION because there was\nno need for it (did a run with a --without-zlib as well).\n- Simplified a bit the error handling for incorrect option\ncombinations, using a switch/case while on it.\n- Renamed the existing variable \"compression\" in walmethods.c to\ncompression_level, to reduce any confusion with the introduction of\ncompression_method. One thing I have noticed is about the tar method,\nwhere we rely on the compression level to decide if compression should\nbe used or not. There should be some simplifications possible there\nbut there is a huge take in receivelog.c where we use COMPRESSION_NONE\nto track down that we still want to zero a new segment when using tar\nmethod.\n- Use of 'I' as short option name, err... After applying the first\nbatch..\n\nBased on the work of 0001, there were some conflicts with 0002. I\nhave solved them while reviewing it, and adapted the code to what got\nalready applied.\n\n+ header_size = LZ4F_compressBegin(ctx, lz4buf, lz4bufsize, NULL);\n+ if (LZ4F_isError(header_size))\n+ {\n+ pg_free(lz4buf);\n+ close(fd);\n+ return NULL;\n+ }\nIn dir_open_for_write(), I guess that this one is missing one\nLZ4F_freeCompressionContext().\n\n+ status = LZ4F_freeDecompressionContext(ctx);\n+ if (LZ4F_isError(status))\n+ {\n+ pg_log_error(\"could not free LZ4 decompression context: %s\",\n+ LZ4F_getErrorName(status));\n+ exit(1);\n+ }\n+\n+ if (uncompressed_size != WalSegSz)\n+ {\n+ pg_log_warning(\"compressed segment file \\\"%s\\\" has\nincorrect uncompressed size %ld, skipping\",\n+ dirent->d_name, uncompressed_size);\n+ (void) LZ4F_freeDecompressionContext(ctx);\n+ continue;\n+ }\nWhen the uncompressed size does not match out expected size, the\nsecond LZ4F_freeDecompressionContext() looks unnecessary to me because\nwe have already one a couple of lines above.\n\n+ ctx_out = LZ4F_createCompressionContext(&ctx, LZ4F_VERSION);\n+ lz4bufsize = LZ4F_compressBound(LZ4_IN_SIZE, NULL);\n+ if (LZ4F_isError(ctx_out))\n+ {\n+ close(fd);\n+ return NULL;\n+ }\nLZ4F_compressBound() can be after the check on ctx_out, here.\n\n+ while (1)\n+ {\n+ char *readp;\n+ char *readend;\nSimply looping when decompressing a segment to check its size looks\nrather unsafe to me. We should leave the loop once uncompressed_size\nis strictly more than WalSegSz.\n\nThe amount of TAP tests looks fine, and that's consistent with what we\ndo for zlib^D^D^D^Dgzip. Now, when testing manually pg_receivewal\nwith various combinations of gzip, lz4 and none, I can see the\nfollowing failure in the code that calculates the streaming start\npoint:\npg_receivewal: error: could not decompress file\n\"wals//000000010000000000000006.lz4\": ERROR_frameType_unknown\n\nIn the LZ4 code, this points to lib/lz4frame.c, where we read an\nincorrect header (see the part that does not match LZ4F_MAGICNUMBER).\nThe segments written by pg_receivewal are clean. Please note that\nthis shows up as well when manually compressing some segments with a\nsimple lz4 command, to simulate for example the case where a user\ncompressed some segments by himself/herself before running\npg_receivewal.\n\nSo, tour code does LZ4F_createDecompressionContext() followed by a\nloop on read() and LZ4F_decompress() that relies on an input and an\noutput buffer of a fixed 4kB size (we could use 64kB at least here \nactually?). So this set of loops looks rather correct to me.\n\nNow, this part is weird:\n+ while (readp < readend)\n+ {\n+ size_t read_size = 1;\n+ size_t out_size = 1;\n\nI would have expected read_size to be (readend - readp) to match with\nthe remaining data in the read buffer that we still need to read.\nShouldn't out_size also be LZ4_CHUNK_SZ to match with the size of the\noutput buffer where all the contents are read? By setting it to 1, I\nthink that this is doing more loops into LZ4F_decompress() than really\nnecessary. It would not hurt either to memset(0) those buffers before\nthey are used, IMO. I am not completely sure either, but should we\nuse the number of bytes returned by LZ4F_decompress() as a hint for\nthe follow-up loops?\n\nAttached is an updated patch, which includes fixes for most of the\nissues I am mentioning above. Please note that I have not changed\nFindStreamingStart(), so this part is the same as v9.\n\nThanks,\n--\nMichael", "msg_date": "Thu, 4 Nov 2021 16:31:48 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Thu, Nov 04, 2021 at 04:31:48PM +0900, Michael Paquier wrote:\n> I would have expected read_size to be (readend - readp) to match with\n> the remaining data in the read buffer that we still need to read.\n> Shouldn't out_size also be LZ4_CHUNK_SZ to match with the size of the\n> output buffer where all the contents are read? By setting it to 1, I\n> think that this is doing more loops into LZ4F_decompress() than really\n> necessary. It would not hurt either to memset(0) those buffers before\n> they are used, IMO. I am not completely sure either, but should we\n> use the number of bytes returned by LZ4F_decompress() as a hint for\n> the follow-up loops?\n>\n> +#ifdef HAVE_LIBLZ4\n> + while (readp < readend)\n> + {\n> + size_t read_size = 1;\n> + size_t out_size = 1;\n> +\n> + status = LZ4F_decompress(ctx, outbuf, &out_size,\n> + readbuf, &read_size, NULL);\n\nAnd... It happens that the error from v9 is here, where we need to\nread the amount of remaining data from \"readp\", and not \"readbuf\" :)\n\nIt is already late here, I'll continue on this stuff tomorrow, but\nthis looks rather committable overall. \n--\nMichael", "msg_date": "Thu, 4 Nov 2021 17:21:53 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "\n\n‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Thursday, November 4th, 2021 at 9:21 AM, Michael Paquier <michael@paquier.xyz> wrote:\n\n> On Thu, Nov 04, 2021 at 04:31:48PM +0900, Michael Paquier wrote:\n> Thanks. I have looked at 0001 today, and applied it after fixing a\n> couple of issues.\n\nGreat! Thank you very much.\n\n> From memory:\n> - zlib.h was missing from pg_receivewal.c, issue that I noticed after\n> removing the redefinition of Z_DEFAULT_COMPRESSION because there was\n> no need for it (did a run with a --without-zlib as well).\n\nYeah, I simply wanted to avoid adding a header. Either way works really.\n\n> - Simplified a bit the error handling for incorrect option\n> combinations, using a switch/case while on it.\n\nMuch cleaner done this way.\n\n> - Renamed the existing variable \"compression\" in walmethods.c to\n> compression_level, to reduce any confusion with the introduction of\n> compression_method. One thing I have noticed is about the tar method,\n> where we rely on the compression level to decide if compression should\n> be used or not. There should be some simplifications possible there\n> but there is a huge take in receivelog.c where we use COMPRESSION_NONE\n> to track down that we still want to zero a new segment when using tar\n> method.\n\nAgreed.\n\n> - Use of 'I' as short option name, err... After applying the first\n> batch..\n\nI left that in just to have the two compression related options next to each\nother when switching. I assumed it might help with readability for the next\ndeveloper looking at it.\n\nRemoving it, is cleaner for the option definifion though, thanks.\n\n>\n> Based on the work of 0001, there were some conflicts with 0002. I\n> have solved them while reviewing it, and adapted the code to what got\n> already applied.\n\nThank you very much.\n\n>\n> + header_size = LZ4F_compressBegin(ctx, lz4buf, lz4bufsize, NULL);\n> + if (LZ4F_isError(header_size))\n> + {\n> + pg_free(lz4buf);\n> + close(fd);\n> + return NULL;\n> + }\n> In dir_open_for_write(), I guess that this one is missing one\n> LZ4F_freeCompressionContext().\n\nAgreed.\n\n>\n> + status = LZ4F_freeDecompressionContext(ctx);\n> + if (LZ4F_isError(status))\n> + {\n> + pg_log_error(\"could not free LZ4 decompression context: %s\",\n> + LZ4F_getErrorName(status));\n> + exit(1);\n> + }\n> +\n> + if (uncompressed_size != WalSegSz)\n> + {\n> + pg_log_warning(\"compressed segment file \\\"%s\\\" has\n> incorrect uncompressed size %ld, skipping\",\n> + dirent->d_name, uncompressed_size);\n> + (void) LZ4F_freeDecompressionContext(ctx);\n> + continue;\n> + }\n> When the uncompressed size does not match out expected size, the\n> second LZ4F_freeDecompressionContext() looks unnecessary to me because\n> we have already one a couple of lines above.\n\nAgreed.\n\n>\n> + ctx_out = LZ4F_createCompressionContext(&ctx, LZ4F_VERSION);\n> + lz4bufsize = LZ4F_compressBound(LZ4_IN_SIZE, NULL);\n> + if (LZ4F_isError(ctx_out))\n> + {\n> + close(fd);\n> + return NULL;\n> + }\n> LZ4F_compressBound() can be after the check on ctx_out, here.\n>\n> + while (1)\n> + {\n> + char *readp;\n> + char *readend;\n> Simply looping when decompressing a segment to check its size looks\n> rather unsafe to me. We should leave the loop once uncompressed_size\n> is strictly more than WalSegSz.\n\nThe loop exits when done reading or when it failed to read:\n\n+ r = read(fd, readbuf, sizeof(readbuf));\n+ if (r < 0)\n+ {\n+ pg_log_error(\"could not read file \\\"%s\\\": %m\", fullpath);\n+ exit(1);\n+ }\n+\n+ /* Done reading */\n+ if (r == 0)\n+ break;\n\nAlthough I do agree that it can exit before that, if the uncompressed size is\ngreater than WalSegSz.\n\n>\n> The amount of TAP tests looks fine, and that's consistent with what we\n> do for zlib^D^D^D^Dgzip. Now, when testing manually pg_receivewal\n> with various combinations of gzip, lz4 and none, I can see the\n> following failure in the code that calculates the streaming start\n> point:\n> pg_receivewal: error: could not decompress file\n> \"wals//000000010000000000000006.lz4\": ERROR_frameType_unknown\n>\n\nHmmm.... I will look into that.\n\n> In the LZ4 code, this points to lib/lz4frame.c, where we read an\n> incorrect header (see the part that does not match LZ4F_MAGICNUMBER).\n> The segments written by pg_receivewal are clean. Please note that\n> this shows up as well when manually compressing some segments with a\n> simple lz4 command, to simulate for example the case where a user\n> compressed some segments by himself/herself before running\n> pg_receivewal.\n>\n\nRights, thank you for investigating. I will look further.\n\n> So, tour code does LZ4F_createDecompressionContext() followed by a\n> loop on read() and LZ4F_decompress() that relies on an input and an\n> output buffer of a fixed 4kB size (we could use 64kB at least here\n> actually?). So this set of loops looks rather correct to me.\n>\n\nFor what is worth, in a stand alone program I wrote while investigating, I did\nnot notice any noteworthy performance gain, when decompressing files of original\nsize similar to common WalSegSz values, using 4kB, 8kB, 16kB and 32kB buffers.\nI did not try 64kB though. This was by no means exhaustive performance testing,\nthough good enough to propose a value. I chose 4kB because it is small enough to\nhave in the stack. I thought anything bigger should be heap alloced and that\nwould add a bit more distraction in the code with the pg_free() calls.\n\nI will re-write to use 64kB in the heap.\n\n> Now, this part is weird:\n> + while (readp < readend)\n> + {\n> + size_t read_size = 1;\n> + size_t out_size = 1;\n>\n> I would have expected read_size to be (readend - readp) to match with\n> the remaining data in the read buffer that we still need to read.\n> Shouldn't out_size also be LZ4_CHUNK_SZ to match with the size of the\n> output buffer where all the contents are read? By setting it to 1, I\n> think that this is doing more loops into LZ4F_decompress() than really\n> necessary.\n\nYou are very correct. An oversight when moving code over from my program and\nrenaming variables. Consider me embarrassed.\n\n> It would not hurt either to memset(0) those buffers before\n> they are used, IMO.\n\nIt does not hurt, yet I do not think that is necessary because one buffer is\nthrow away, i.e. the program writes to it but we never read it, and the other is\noverwritten during the read call.\n\n> I am not completely sure either, but should we\n> use the number of bytes returned by LZ4F_decompress() as a hint for\n> the follow-up loops?\n\nIt is possible, though in my humble opinion it adds some code and has no\nmeasurable effect in the code.\n\n> > + status = LZ4F_decompress(ctx, outbuf, &out_size,\n> > + readbuf, &read_size, NULL);\n\n> And... It happens that the error from v9 is here, where we need to\n> read the amount of remaining data from \"readp\", and not \"readbuf\" :)\n\nAgreed.\n\nI really suck at renaming all the things... I am really embarrassed.\n\n>\n> Attached is an updated patch, which includes fixes for most of the\n> issues I am mentioning above. Please note that I have not changed\n> FindStreamingStart(), so this part is the same as v9.\n\nThanks!\n\nCheers,\n//Georgios\n\n>\n> Thanks,\n> --\n> Michael\n\n\n", "msg_date": "Thu, 04 Nov 2021 09:53:32 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Thursday, November 4th, 2021 at 9:21 AM, Michael Paquier <michael@paquier.xyz> wrote:\n> > +#ifdef HAVE_LIBLZ4\n> > + while (readp < readend)\n> > + {\n> > + size_t read_size = 1;\n> > + size_t out_size = 1;\n> > +\n> > + status = LZ4F_decompress(ctx, outbuf, &out_size,\n> > + readbuf, &read_size, NULL);\n>\n> And... It happens that the error from v9 is here, where we need to\n> read the amount of remaining data from \"readp\", and not \"readbuf\" :)\n>\n> It is already late here, I'll continue on this stuff tomorrow, but\n> this looks rather committable overall.\n\nThank you for v11 of the patch. Please find attached v12 which addresses a few\nminor points.\n\nAdded an Oxford comma since the list now contains three or more terms:\n- <option>--with-lz4</option>) and <literal>none</literal>.\n+ <option>--with-lz4</option>), and <literal>none</literal>.\n\nRemoved an extra condinional check while switching over compression_method.\nInstead of:\n + case COMPRESSION_LZ4:\n +#ifdef HAVE_LIBLZ4\n + if (compresslevel != 0)\n + {\n + pg_log_error(\"cannot use --compress with\n --compression-method=%s\",\n + \"lz4\");\n + fprintf(stderr, _(\"Try \\\"%s --help\\\" for more information.\\n\"),\n + progname);\n + exit(1);\n + }\n +#else\n + if (compression_method == COMPRESSION_LZ4)\n + {\n + pg_log_error(\"this build does not support compression with %s\",\n + \"LZ4\");\n + exit(1);\n + }\n + break;\n +#endif\n\nI opted for:\n + case COMPRESSION_LZ4:\n +#ifdef HAVE_LIBLZ4\n + if (compresslevel != 0)\n + {\n + pg_log_error(\"cannot use --compress with\n --compression-method=%s\",\n + \"lz4\");\n + fprintf(stderr, _(\"Try \\\"%s --help\\\" for more information.\\n\"),\n + progname);\n + exit(1);\n + }\n +#else\n + pg_log_error(\"this build does not support compression with %s\",\n + \"LZ4\");\n + exit(1);\n + #endif\n\nThere was an error while trying to find the streaming start. The code read:\n+ else if (!ispartial && compression_method == COMPRESSION_LZ4)\n\nwhere it should be instead:\n+ else if (!ispartial && wal_compression_method == COMPRESSION_LZ4)\n\nbecause compression_method is the global option exposed to the whereas\nwal_compression_method is the local variable used to figure out what kind of\nfile the function is currently working with. This error was existing at least in\nv9-0002 of $subject.\n\nThe variables readbuf and outbuf, used in the decompression of LZ4 files, are\nnow heap allocated.\n\nLast, while the following is correct:\n+ /*\n+ * Once we have read enough data to cover one segment, we are\n+ * done, there is no need to do more.\n+ */\n+ while (uncompressed_size <= WalSegSz)\n\nI felt that converting it a do {} while () loop instead, will help with\nreadability:\n+ do\n+ {\n<snip>\n+ /*\n+ * No need to continue reading the file when the uncompressed_size\n+ * exceeds WalSegSz, even if there are still data left to read.\n+ * However, if uncompressed_size is equal to WalSegSz, it should\n+ * verify that there is no more data to read.\n+ */\n+ } while (r > 0 && uncompressed_size <= WalSegSz);\n\nof course the check:\n+ /* Done reading the file */\n+ if (r == 0)\n+ break;\nmidway the loop is no longer needed and thus removed.\n\nI would like to have a bit more test coverage in the case for FindStreamingStart().\nSpecifically for the case that a lz4-compressed segment larger than WalSegSz exists.\nThe attached patch does not contain such test case. I am not very certain on how to\ncreate such a test case reliably as it would be mostly based on a warning emitted\nduring the parsing of such a file.\n\nCheers,\n//Georgios\n\n> --\n> Michael", "msg_date": "Thu, 04 Nov 2021 17:02:28 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Thu, Nov 04, 2021 at 05:02:28PM +0000, gkokolatos@pm.me wrote:\n> Removed an extra condinional check while switching over compression_method.\n\nIndeed. My rebase was a bit sloppy here.\n\n> because compression_method is the global option exposed to the whereas\n> wal_compression_method is the local variable used to figure out what kind of\n> file the function is currently working with. This error was existing at least in\n> v9-0002 of $subject.\n\nRight.\n\n> I felt that converting it a do {} while () loop instead, will help with\n> readability:\n> + do\n> + {\n> <snip>\n> + /*\n> + * No need to continue reading the file when the uncompressed_size\n> + * exceeds WalSegSz, even if there are still data left to read.\n> + * However, if uncompressed_size is equal to WalSegSz, it should\n> + * verify that there is no more data to read.\n> + */\n> + } while (r > 0 && uncompressed_size <= WalSegSz);\n\nNo objections from me to do that. This makes the code a bit easier to\nfollow, indeed.\n\n> I would like to have a bit more test coverage in the case for FindStreamingStart().\n> Specifically for the case that a lz4-compressed segment larger than WalSegSz exists.\n\nThe same could be said for gzip. I am not sure that this is worth the\nextra I/O and pg_receivewal commands, though.\n\nI have spent an extra couple of hours staring at the code, and the\nwhole looked fine, so applied. While on it, I have tested the new TAP\ntests with all the possible combinations of --without-zlib and\n--with-lz4.\n--\nMichael", "msg_date": "Fri, 5 Nov 2021 11:47:20 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "\n\n‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Friday, November 5th, 2021 at 3:47 AM, Michael Paquier <michael@paquier.xyz> wrote:\n\n>\n> I have spent an extra couple of hours staring at the code, and the\n> whole looked fine, so applied. While on it, I have tested the new TAP\n> tests with all the possible combinations of --without-zlib and\n> --with-lz4.\n\nGreat news. Thank you very much.\n\nCheers,\n//Georgios\n\n> --\n> Michael\n\n\n", "msg_date": "Fri, 05 Nov 2021 07:50:52 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "In dir_open_for_write() I observe that we are writing the header\nand then calling LZ4F_compressEnd() in case there is an error\nwhile writing the buffer to the file, and the output buffer of\nLZ4F_compressEnd() is not written anywhere. Why should this be\nnecessary? To flush the contents of the internal buffer? But, then we\nare calling LZ4F_freeCompressionContext() immediately after the\nLZ4F_compressEnd() call. I might be missing something, will be\nhappy to get more insights.\n\nRegards,\nJeevan Ladhe\n\nOn Fri, Nov 5, 2021 at 1:21 PM <gkokolatos@pm.me> wrote:\n\n>\n>\n> ‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n>\n> On Friday, November 5th, 2021 at 3:47 AM, Michael Paquier <\n> michael@paquier.xyz> wrote:\n>\n> >\n> > I have spent an extra couple of hours staring at the code, and the\n> > whole looked fine, so applied. While on it, I have tested the new TAP\n> > tests with all the possible combinations of --without-zlib and\n> > --with-lz4.\n>\n> Great news. Thank you very much.\n>\n> Cheers,\n> //Georgios\n>\n> > --\n> > Michael\n>\n>\n>\n\nIn dir_open_for_write() I observe that we are writing the headerand then calling LZ4F_compressEnd() in case there is an errorwhile writing the buffer to the file, and the output buffer ofLZ4F_compressEnd() is not written anywhere. Why should this benecessary? To flush the contents of the internal buffer? But, then weare calling LZ4F_freeCompressionContext() immediately after theLZ4F_compressEnd() call. I might be missing something, will behappy to get more insights.Regards,Jeevan LadheOn Fri, Nov 5, 2021 at 1:21 PM <gkokolatos@pm.me> wrote:\n\n‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Friday, November 5th, 2021 at 3:47 AM, Michael Paquier <michael@paquier.xyz> wrote:\n\n>\n> I have spent an extra couple of hours staring at the code, and the\n> whole looked fine, so applied. While on it, I have tested the new TAP\n> tests with all the possible combinations of --without-zlib and\n> --with-lz4.\n\nGreat news. Thank you very much.\n\nCheers,\n//Georgios\n\n> --\n> Michael", "msg_date": "Thu, 18 Nov 2021 19:54:37 +0530", "msg_from": "Jeevan Ladhe <jeevan.ladhe@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Thu, Nov 18, 2021 at 07:54:37PM +0530, Jeevan Ladhe wrote:\n> In dir_open_for_write() I observe that we are writing the header\n> and then calling LZ4F_compressEnd() in case there is an error\n> while writing the buffer to the file, and the output buffer of\n> LZ4F_compressEnd() is not written anywhere. Why should this be\n> necessary? To flush the contents of the internal buffer? But, then we\n> are calling LZ4F_freeCompressionContext() immediately after the\n> LZ4F_compressEnd() call. I might be missing something, will be\n> happy to get more insights.\n\nMy concern here was symmetry, where IMO it makes sense to have a\ncompressEnd call each time there is a successful compressBegin call\ndone for the LZ4 state data, as there is no way to know if in the\nfuture LZ4 won't change some of its internals to do more than just an\ninternal flush.\n--\nMichael", "msg_date": "Fri, 19 Nov 2021 11:07:40 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "‐‐‐‐‐‐‐ Original Message ‐‐‐‐‐‐‐\n\nOn Friday, November 19th, 2021 at 3:07 AM, Michael Paquier <michael@paquier.xyz> wrote:\n\n> On Thu, Nov 18, 2021 at 07:54:37PM +0530, Jeevan Ladhe wrote:\n>\n> > In dir_open_for_write() I observe that we are writing the header\n> > and then calling LZ4F_compressEnd() in case there is an error\n> > while writing the buffer to the file, and the output buffer of\n> > LZ4F_compressEnd() is not written anywhere. Why should this be\n> > necessary? To flush the contents of the internal buffer? But, then we\n> > are calling LZ4F_freeCompressionContext() immediately after the\n> > LZ4F_compressEnd() call. I might be missing something, will be\n> > happy to get more insights.\n>\n> My concern here was symmetry, where IMO it makes sense to have a\n> compressEnd call each time there is a successful compressBegin call\n> done for the LZ4 state data, as there is no way to know if in the\n> future LZ4 won't change some of its internals to do more than just an\n> internal flush.\n\nAgreed.\n\nAlthough the library does provide an interface for simply flushing contents, it\nalso assumes that each initializing call will have a finilizing call. If my\nmemory serves me right, earlier versions of the patch, did not have this\nsummetry, but that got ammended.\n\nCheers,\n//Georgios\n\n> ---\n> Michael\n\n\n\n", "msg_date": "Fri, 19 Nov 2021 10:07:50 +0000", "msg_from": "gkokolatos@pm.me", "msg_from_op": true, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Fri, Nov 19, 2021 at 7:37 AM Michael Paquier <michael@paquier.xyz> wrote:\n\n> On Thu, Nov 18, 2021 at 07:54:37PM +0530, Jeevan Ladhe wrote:\n> > In dir_open_for_write() I observe that we are writing the header\n> > and then calling LZ4F_compressEnd() in case there is an error\n> > while writing the buffer to the file, and the output buffer of\n> > LZ4F_compressEnd() is not written anywhere. Why should this be\n> > necessary? To flush the contents of the internal buffer? But, then we\n> > are calling LZ4F_freeCompressionContext() immediately after the\n> > LZ4F_compressEnd() call. I might be missing something, will be\n> > happy to get more insights.\n>\n> My concern here was symmetry, where IMO it makes sense to have a\n> compressEnd call each time there is a successful compressBegin call\n> done for the LZ4 state data, as there is no way to know if in the\n> future LZ4 won't change some of its internals to do more than just an\n> internal flush.\n>\n\nFair enough. But, still I have a doubt in mind what benefit would that\nreally bring to us here, because we are immediately also freeing the\nlz4buf without using it anywhere.\n\nRegards,\nJeevan\n\nOn Fri, Nov 19, 2021 at 7:37 AM Michael Paquier <michael@paquier.xyz> wrote:On Thu, Nov 18, 2021 at 07:54:37PM +0530, Jeevan Ladhe wrote:\n> In dir_open_for_write() I observe that we are writing the header\n> and then calling LZ4F_compressEnd() in case there is an error\n> while writing the buffer to the file, and the output buffer of\n> LZ4F_compressEnd() is not written anywhere. Why should this be\n> necessary? To flush the contents of the internal buffer? But, then we\n> are calling LZ4F_freeCompressionContext() immediately after the\n> LZ4F_compressEnd() call. I might be missing something, will be\n> happy to get more insights.\n\nMy concern here was symmetry, where IMO it makes sense to have a\ncompressEnd call each time there is a successful compressBegin call\ndone for the LZ4 state data, as there is no way to know if in the\nfuture LZ4 won't change some of its internals to do more than just an\ninternal flush.Fair enough. But, still I have a doubt in mind what benefit would thatreally bring to us here, because we are immediately also freeing thelz4buf without using it anywhere.Regards,Jeevan", "msg_date": "Mon, 22 Nov 2021 11:15:55 +0530", "msg_from": "Jeevan Ladhe <jeevan.ladhe@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Mon, Nov 22, 2021 at 12:46 AM Jeevan Ladhe\n<jeevan.ladhe@enterprisedb.com> wrote:\n> Fair enough. But, still I have a doubt in mind what benefit would that\n> really bring to us here, because we are immediately also freeing the\n> lz4buf without using it anywhere.\n\nYeah, I'm also doubtful about that. If we're freeng the compression\ncontext, we shouldn't need to guarantee that it's in any particular\nstate before doing so. Why would any critical cleanup be part of\nLZ4F_compressEnd() rather than LZ4F_freeCompressionContext()? The\npoint of LZ4F_compressEnd() is to make sure all of the output bytes\nget written, and it would be stupid to force people to write the\noutput bytes even when they've decided that they no longer care about\nthem due to some error.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Mon, 22 Nov 2021 09:02:47 -0500", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Mon, Nov 22, 2021 at 09:02:47AM -0500, Robert Haas wrote:\n> On Mon, Nov 22, 2021 at 12:46 AM Jeevan Ladhe\n> <jeevan.ladhe@enterprisedb.com> wrote:\n>> Fair enough. But, still I have a doubt in mind what benefit would that\n>> really bring to us here, because we are immediately also freeing the\n>> lz4buf without using it anywhere.\n> \n> Yeah, I'm also doubtful about that. If we're freeng the compression\n> context, we shouldn't need to guarantee that it's in any particular\n> state before doing so. Why would any critical cleanup be part of\n> LZ4F_compressEnd() rather than LZ4F_freeCompressionContext()? The\n> point of LZ4F_compressEnd() is to make sure all of the output bytes\n> get written, and it would be stupid to force people to write the\n> output bytes even when they've decided that they no longer care about\n> them due to some error.\n\nHmm. I have double-checked all that, and I agree that we could just\nskip LZ4F_compressEnd() in this error code path. From what I can see\nin the upstream code, what we have now is not broken either, but the\ncompressEnd() call does some work that's not needed here.\n--\nMichael", "msg_date": "Wed, 24 Nov 2021 14:25:52 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Wed, Nov 24, 2021 at 10:55 AM Michael Paquier <michael@paquier.xyz>\nwrote:\n\n> On Mon, Nov 22, 2021 at 09:02:47AM -0500, Robert Haas wrote:\n> > On Mon, Nov 22, 2021 at 12:46 AM Jeevan Ladhe\n> > <jeevan.ladhe@enterprisedb.com> wrote:\n> >> Fair enough. But, still I have a doubt in mind what benefit would that\n> >> really bring to us here, because we are immediately also freeing the\n> >> lz4buf without using it anywhere.\n> >\n> > Yeah, I'm also doubtful about that. If we're freeng the compression\n> > context, we shouldn't need to guarantee that it's in any particular\n> > state before doing so. Why would any critical cleanup be part of\n> > LZ4F_compressEnd() rather than LZ4F_freeCompressionContext()? The\n> > point of LZ4F_compressEnd() is to make sure all of the output bytes\n> > get written, and it would be stupid to force people to write the\n> > output bytes even when they've decided that they no longer care about\n> > them due to some error.\n>\n> Hmm. I have double-checked all that, and I agree that we could just\n> skip LZ4F_compressEnd() in this error code path. From what I can see\n> in the upstream code, what we have now is not broken either, but the\n> compressEnd() call does some work that's not needed here.\n\n\nYes I agree that we are not broken, but as you said we are doing some\nan extra bit of work here.\n\nRegards,\nJeevan Ladhe\n\nOn Wed, Nov 24, 2021 at 10:55 AM Michael Paquier <michael@paquier.xyz> wrote:On Mon, Nov 22, 2021 at 09:02:47AM -0500, Robert Haas wrote:\n> On Mon, Nov 22, 2021 at 12:46 AM Jeevan Ladhe\n> <jeevan.ladhe@enterprisedb.com> wrote:\n>> Fair enough. But, still I have a doubt in mind what benefit would that\n>> really bring to us here, because we are immediately also freeing the\n>> lz4buf without using it anywhere.\n> \n> Yeah, I'm also doubtful about that. If we're freeng the compression\n> context, we shouldn't need to guarantee that it's in any particular\n> state before doing so. Why would any critical cleanup be part of\n> LZ4F_compressEnd() rather than LZ4F_freeCompressionContext()? The\n> point of LZ4F_compressEnd() is to make sure all of the output bytes\n> get written, and it would be stupid to force people to write the\n> output bytes even when they've decided that they no longer care about\n> them due to some error.\n\nHmm.  I have double-checked all that, and I agree that we could just\nskip LZ4F_compressEnd() in this error code path.  From what I can see\nin the upstream code, what we have now is not broken either, but the\ncompressEnd() call does some work that's not needed here.Yes I agree that we are not broken, but as you said we are doing somean extra bit of work here.Regards,Jeevan Ladhe", "msg_date": "Wed, 24 Nov 2021 17:24:16 +0530", "msg_from": "Jeevan Ladhe <jeevan.ladhe@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Thu, Nov 4, 2021 at 10:47 PM Michael Paquier <michael@paquier.xyz> wrote:\n> Indeed. My rebase was a bit sloppy here.\n\nHi!\n\nOver in http://postgr.es/m/CA+TgmoYUDEJga2qV_XbAZ=pGEBaOsgFmzZ6Ac4_sRwOm_+UeHA@mail.gmail.com\nI was noticing that CreateWalTarMethod doesn't support LZ4\ncompression. It would be nice if it did. I thought maybe the patch on\nthis thread would fix that, but I think maybe it doesn't, because it\nlooks like that's touching the WalDirectoryMethod part of that file,\nrather than the WalTarMethod part. Is that correct? And, on a related\nnote, Michael, do you plan to get something committed here?\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 11 Feb 2022 10:07:49 -0500", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Fri, Feb 11, 2022 at 10:07:49AM -0500, Robert Haas wrote:\n> Over in http://postgr.es/m/CA+TgmoYUDEJga2qV_XbAZ=pGEBaOsgFmzZ6Ac4_sRwOm_+UeHA@mail.gmail.com\n> I was noticing that CreateWalTarMethod doesn't support LZ4\n> compression. It would be nice if it did. I thought maybe the patch on\n> this thread would fix that, but I think maybe it doesn't, because it\n> looks like that's touching the WalDirectoryMethod part of that file,\n> rather than the WalTarMethod part. Is that correct?\n\nCorrect. pg_receivewal only cares about the directory method, so this\nthread was limited to this part. Yes, it would be nice to extend\nfully the tar method of walmethods.c to support LZ4, but I was not\nsure what needed to be done, and I am still not sure based on what has\njust been done as of 751b8d23.\n\n> And, on a related note, Michael, do you plan to get something\n> committed here? \n\nApart from f79962d, babbbb5 and 50e1441, I don't think that there was\nsomething left to do for this thread. Perhaps I am missing something?\n--\nMichael", "msg_date": "Sat, 12 Feb 2022 12:52:40 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Fri, Feb 11, 2022 at 10:52 PM Michael Paquier <michael@paquier.xyz> wrote:\n> > And, on a related note, Michael, do you plan to get something\n> > committed here?\n>\n> Apart from f79962d, babbbb5 and 50e1441, I don't think that there was\n> something left to do for this thread. Perhaps I am missing something?\n\nOh, my mistake. I didn't realize you'd already committed it.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Mon, 14 Feb 2022 15:07:46 -0500", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Sat, Feb 12, 2022 at 12:52:40PM +0900, Michael Paquier wrote:\n> On Fri, Feb 11, 2022 at 10:07:49AM -0500, Robert Haas wrote:\n> > Over in http://postgr.es/m/CA+TgmoYUDEJga2qV_XbAZ=pGEBaOsgFmzZ6Ac4_sRwOm_+UeHA@mail.gmail.com\n> > I was noticing that CreateWalTarMethod doesn't support LZ4\n> > compression. It would be nice if it did. I thought maybe the patch on\n> > this thread would fix that, but I think maybe it doesn't, because it\n> > looks like that's touching the WalDirectoryMethod part of that file,\n> > rather than the WalTarMethod part. Is that correct?\n> \n> Correct. pg_receivewal only cares about the directory method, so this\n> thread was limited to this part. Yes, it would be nice to extend\n> fully the tar method of walmethods.c to support LZ4, but I was not\n> sure what needed to be done, and I am still not sure based on what has\n> just been done as of 751b8d23.\n> \n> > And, on a related note, Michael, do you plan to get something\n> > committed here? \n> \n> Apart from f79962d, babbbb5 and 50e1441, I don't think that there was\n> something left to do for this thread. Perhaps I am missing something?\n\nI think this should use <lz4frame.h>\n\n+#include \"lz4frame.h\"\n\ncommit babbbb595d2322da095a1e6703171b3f1f2815cb\nAuthor: Michael Paquier <michael@paquier.xyz>\nDate: Fri Nov 5 11:33:25 2021 +0900\n\n Add support for LZ4 compression in pg_receivewal\n\n-- \nJustin\n\n\n", "msg_date": "Thu, 17 Mar 2022 06:12:20 -0500", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" }, { "msg_contents": "On Thu, Mar 17, 2022 at 06:12:20AM -0500, Justin Pryzby wrote:\n> I think this should use <lz4frame.h>\n> \n> +#include \"lz4frame.h\"\n> \n> commit babbbb595d2322da095a1e6703171b3f1f2815cb\n> Author: Michael Paquier <michael@paquier.xyz>\n> Date: Fri Nov 5 11:33:25 2021 +0900\n> \n> Add support for LZ4 compression in pg_receivewal\n\nYes, you are right. A second thing is that should be declared before\nthe PG headers.\n--\nMichael", "msg_date": "Fri, 18 Mar 2022 10:38:54 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Teach pg_receivewal to use lz4 compression" } ]
[ { "msg_contents": "When specifying NUMERIC(precision, scale) the scale is constrained to\nthe range [0, precision], which is per SQL spec. However, at least one\nother major database vendor intentionally does not impose this\nrestriction, since allowing scales outside this range can be useful.\n\nA negative scale implies rounding before the decimal point. For\nexample, a column declared as NUMERIC(3,-3) rounds values to the\nnearest thousand, and can hold values up to 999000.\n\n(Note that the display scale remains non-negative, so all digits\nbefore the decimal point are displayed, and none of the internals of\nnumeric.c need to worry about negative dscale values. Only the scale\nin the typemod is negative.)\n\nA scale greater than the precision constrains the value to be less\nthan 0.1. For example, a column declared as NUMERIC(3,6) can hold\n\"micro\" quantities up to 0.000999.\n\nAttached is a WIP patch supporting this.\n\nRegards,\nDean", "msg_date": "Tue, 29 Jun 2021 20:58:38 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "WIP: Relaxing the constraints on numeric scale" }, { "msg_contents": "On Tue, Jun 29, 2021 at 3:58 PM Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> When specifying NUMERIC(precision, scale) the scale is constrained to\n> the range [0, precision], which is per SQL spec. However, at least one\n> other major database vendor intentionally does not impose this\n> restriction, since allowing scales outside this range can be useful.\n\nI thought about this too, but\nhttp://postgr.es/m/774767.1591985683@sss.pgh.pa.us made me think that\nit would be an on-disk format break. Maybe it's not, though?\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 29 Jun 2021 16:34:32 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: WIP: Relaxing the constraints on numeric scale" }, { "msg_contents": "On Tue, 29 Jun 2021 at 21:34, Robert Haas <robertmhaas@gmail.com> wrote:\n>\n> I thought about this too, but\n> http://postgr.es/m/774767.1591985683@sss.pgh.pa.us made me think that\n> it would be an on-disk format break. Maybe it's not, though?\n>\n\nNo, because the numeric dscale remains non-negative, so there's no\nchange to the way numeric values are stored. The only change is to\nextend the allowed scale in the numeric typemod.\n\nRegards,\nDean\n\n\n", "msg_date": "Tue, 29 Jun 2021 21:45:52 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: WIP: Relaxing the constraints on numeric scale" }, { "msg_contents": "Robert Haas <robertmhaas@gmail.com> writes:\n> On Tue, Jun 29, 2021 at 3:58 PM Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n>> When specifying NUMERIC(precision, scale) the scale is constrained to\n>> the range [0, precision], which is per SQL spec. However, at least one\n>> other major database vendor intentionally does not impose this\n>> restriction, since allowing scales outside this range can be useful.\n\n> I thought about this too, but\n> http://postgr.es/m/774767.1591985683@sss.pgh.pa.us made me think that\n> it would be an on-disk format break. Maybe it's not, though?\n\nSee further down in that thread --- I don't think there's actually\na need for negative dscale on-disk. However, there remains the question\nof whether any external code knows enough about numeric typmods to become\nconfused by a negative scale field within those.\n\nAfter reflecting for a bit, I suspect the answer is \"probably\", but\nit seems like it wouldn't be much worse of an update than any number\nof other catalog changes we make every release.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Tue, 29 Jun 2021 16:55:32 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: WIP: Relaxing the constraints on numeric scale" }, { "msg_contents": "On Tue, Jun 29, 2021 at 4:46 PM Dean Rasheed <dean.a.rasheed@gmail.com> wrote:\n> On Tue, 29 Jun 2021 at 21:34, Robert Haas <robertmhaas@gmail.com> wrote:\n> > I thought about this too, but\n> > http://postgr.es/m/774767.1591985683@sss.pgh.pa.us made me think that\n> > it would be an on-disk format break. Maybe it's not, though?\n>\n> No, because the numeric dscale remains non-negative, so there's no\n> change to the way numeric values are stored. The only change is to\n> extend the allowed scale in the numeric typemod.\n\nAh! Well, in that case, this sounds great.\n\n(I haven't looked at the patch, so this is just an endorsement of the concept.)\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 29 Jun 2021 17:00:51 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: WIP: Relaxing the constraints on numeric scale" }, { "msg_contents": "Attached is a more complete patch, with updated docs and tests.\n\nI chose to allow the scale to be in the range -1000 to 1000, which, to\nsome extent, is quite arbitrary. The upper limit of 1000 makes sense,\nbecause nearly all numeric computations (other than multiply, add and\nsubtract) have that as their upper scale limit (that's the maximum\ndisplay scale). It also has to be at least 1000 for SQL compliance,\nsince the precision can be up to 1000.\n\nThe lower limit, on the other hand, really is quite arbitrary. -1000\nis a nice round number, giving it a certain symmetry, and is almost\ncertainly sufficient for any realistic use case (-1000 means numbers\nare rounded to the nearest multiple of 10^1000).\n\nAlso, keeping some spare bits in the typemod might come in handy one\nday for something else (e.g., rounding mode choice).\n\nRegards,\nDean", "msg_date": "Sat, 3 Jul 2021 11:14:37 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: WIP: Relaxing the constraints on numeric scale" }, { "msg_contents": "Dean Rasheed <dean.a.rasheed@gmail.com> writes:\n> Attached is a more complete patch, with updated docs and tests.\n\nI took a brief look at this and have a couple of quick suggestions:\n\n* As you mention, keeping some spare bits in the typmod might come\nin handy some day, but as given this patch isn't really doing so.\nI think it might be advisable to mask the scale off at 11 bits,\npreserving the high 5 bits of the low-order half of the word for future\nuse. The main objection to that I guess is that it would complicate\ndoing sign extension in TYPMOD_SCALE(). But it doesn't seem like we\nuse that logic in any really hot code paths, so another instruction\nor three probably is not much of a cost.\n\n* I agree with wrapping the typmod construction/extraction into macros\n(or maybe they should be inline functions?) but the names you chose\nseem generic enough to possibly confuse onlookers. I'd suggest\nchanging TYPMOD to NUMERIC_TYPMOD or NUM_TYPMOD. The comment for them\nshould probably also explicitly explain \"For purely historical reasons,\nVARHDRSZ is added to the typmod value after these fields are combined\",\nor words to that effect.\n\n* It might be advisable to write NUMERIC_MIN_SCALE with parens:\n\n#define NUMERIC_MIN_SCALE\t\t\t(-1000)\n\nto avoid any precedence gotchas.\n\n* I'd be inclined to leave the num_typemod_test table in place,\nrather than dropping it, so that it serves to exercise pg_dump\nfor these cases during the pg_upgrade test.\n\nHaven't read the code in detail yet.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 21 Jul 2021 17:33:19 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: WIP: Relaxing the constraints on numeric scale" }, { "msg_contents": "On Wed, 21 Jul 2021 at 22:33, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> I took a brief look at this and have a couple of quick suggestions:\n>\n\nThanks for looking at this!\n\n> * As you mention, keeping some spare bits in the typmod might come\n> in handy some day, but as given this patch isn't really doing so.\n> I think it might be advisable to mask the scale off at 11 bits,\n> preserving the high 5 bits of the low-order half of the word for future\n> use. The main objection to that I guess is that it would complicate\n> doing sign extension in TYPMOD_SCALE(). But it doesn't seem like we\n> use that logic in any really hot code paths, so another instruction\n> or three probably is not much of a cost.\n>\n\nYeah, that makes sense, and it's worth documenting where the spare bits are.\n\nInterestingly, gcc recognised the bit hack I used for sign extension\nand turned it into (x << 21) >> 21 using x86 shl and sar instructions,\nthough I didn't write it that way because apparently that's not\nportable.\n\n> * I agree with wrapping the typmod construction/extraction into macros\n> (or maybe they should be inline functions?) but the names you chose\n> seem generic enough to possibly confuse onlookers. I'd suggest\n> changing TYPMOD to NUMERIC_TYPMOD or NUM_TYPMOD. The comment for them\n> should probably also explicitly explain \"For purely historical reasons,\n> VARHDRSZ is added to the typmod value after these fields are combined\",\n> or words to that effect.\n>\n\nI've turned them into inline functions, since that makes them easier\nto read, and debug if necessary.\n\nAll your other suggestions make sense too. Attached is a new version.\n\nRegards,\nDean", "msg_date": "Thu, 22 Jul 2021 17:06:55 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: WIP: Relaxing the constraints on numeric scale" }, { "msg_contents": "Dean Rasheed <dean.a.rasheed@gmail.com> writes:\n> All your other suggestions make sense too. Attached is a new version.\n\nOK, I've now studied this more closely, and have some additional\nnitpicks:\n\n* I felt the way you did the documentation was confusing. It seems\nbetter to explain the normal case first, and then describe the two\nextended cases.\n\n* As long as we're encapsulating typmod construction/extraction, let's\nalso encapsulate the checks for valid typmods.\n\n* Other places are fairly careful to declare typmod values as \"int32\",\nso I think this code should too.\n\nAttached is a proposed delta patch making those changes.\n\n(I made the docs mention that the extension cases are allowed as of v15.\nWhile useful in the short run, that will look like noise in ten years;\nso I could go either way on whether to do that.)\n\nIf you're good with these, then I think it's ready to go.\nI'll mark it RfC in the commitfest.\n\n\t\t\tregards, tom lane", "msg_date": "Fri, 23 Jul 2021 11:50:09 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: WIP: Relaxing the constraints on numeric scale" }, { "msg_contents": "On Fri, 23 Jul 2021 at 16:50, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> OK, I've now studied this more closely, and have some additional\n> nitpicks:\n>\n> * I felt the way you did the documentation was confusing. It seems\n> better to explain the normal case first, and then describe the two\n> extended cases.\n\nOK, that looks much better. Re-reading the entire section, I think\nit's much clearer now.\n\n> * As long as we're encapsulating typmod construction/extraction, let's\n> also encapsulate the checks for valid typmods.\n\nGood idea.\n\n> * Other places are fairly careful to declare typmod values as \"int32\",\n> so I think this code should too.\n\nOK, that seems sensible.\n\n> Attached is a proposed delta patch making those changes.\n>\n> (I made the docs mention that the extension cases are allowed as of v15.\n> While useful in the short run, that will look like noise in ten years;\n> so I could go either way on whether to do that.)\n\nHmm, yeah. In general,I find such things in the documentation useful\nfor quite a few years. I'm regularly looking to see when a particular\nfeature was added, to see if I can use it in a particular situation.\nBut eventually, it'll become irrelevant, and I don't know if anyone\nwill go around tidying these things up. I have left it in, but perhaps\nthere is a wider discussion to be had about whether we should be doing\nthat more (or less) often. FWIW, I like the way some docs include an\n\"available since\" tag (e.g,, Java's @since tag).\n\n> If you're good with these, then I think it's ready to go.\n> I'll mark it RfC in the commitfest.\n\nThanks. That all looked good, so I have pushed it.\n\nRegards,\nDean\n\n\n", "msg_date": "Mon, 26 Jul 2021 14:51:57 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": true, "msg_subject": "Re: WIP: Relaxing the constraints on numeric scale" } ]
[ { "msg_contents": "On 2021-Jun-29, Alvaro Herrera wrote:\n\n>Ah, yes it does. I can reproduce this now. I thought PQconsumeInput\n>was sufficient, but it's not: you have to have the PQgetResult in there\n>too. Looking ...\n\nI think that has an oversight with a719232\n\nreturn false shouldn't be return 0?\n\nregards,\n\nRanier Vilela\n\n\nOn 2021-Jun-29, Alvaro Herrera wrote: \n>Ah, yes it does. I can reproduce this now. I thought PQconsumeInput>was sufficient, but it's not: you have to have the PQgetResult in there>too. Looking ...I think that has an oversight with a719232return false shouldn't be return 0?regards,Ranier Vilela", "msg_date": "Tue, 29 Jun 2021 17:02:41 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Pipeline mode and PQpipelineSync()" }, { "msg_contents": "On 2021-Jun-29, Ranier Vilela wrote:\n\n> On 2021-Jun-29, Alvaro Herrera wrote:\n> \n> >Ah, yes it does. I can reproduce this now. I thought PQconsumeInput\n> >was sufficient, but it's not: you have to have the PQgetResult in there\n> >too. Looking ...\n> \n> I think that has an oversight with a719232\n> \n> return false shouldn't be return 0?\n\nHah, yeah, it should. Will fix\n\n-- \n�lvaro Herrera Valdivia, Chile\n\n\n", "msg_date": "Tue, 29 Jun 2021 16:20:00 -0400", "msg_from": "Alvaro Herrera <alvaro.herrera@2ndquadrant.com>", "msg_from_op": false, "msg_subject": "Re: Pipeline mode and PQpipelineSync()" } ]
[ { "msg_contents": "Hi, hackers!\n\nMany recently discussed features can make use of an extensible storage\nmanager API. Namely, storage level compression and encryption [1], [2], [3],\ndisk quota feature [4], SLRU storage changes [5], and any other features\nthat may want to substitute PostgreSQL storage layer with their\nimplementation (i.e. lazy_restore [6]).\n\nAttached is a proposal to change smgr API to make it extensible. The idea\nis to add a hook for plugins to get control in smgr and define custom\nstorage managers. The patch replaces smgrsw[] array and smgr_sw selector\nwith smgr() function that loads f_smgr implementation.\n\nAs before it has only one implementation - smgr_md, which is wrapped into\nsmgr_standard().\n\nTo create custom implementation, a developer needs to implement smgr API\nfunctions\n static const struct f_smgr smgr_custom =\n {\n .smgr_init = custominit,\n ...\n }\n\ncreate a hook function\n const f_smgr * smgr_custom(BackendId backend, RelFileNode rnode)\n {\n //Here we can also add some logic and chose which smgr to use based\non rnode and backend\n return &smgr_custom;\n }\n\nand finally set the hook:\n smgr_hook = smgr_custom;\n\n[1]\nhttps://www.postgresql.org/message-id/flat/11996861554042351@iva4-dd95b404a60b.qloud-c.yandex.net\n[2]\nhttps://www.postgresql.org/message-id/flat/272dd2d9.e52a.17235f2c050.Coremail.chjischj%40163.com\n[3] https://postgrespro.com/docs/enterprise/9.6/cfs\n[4]\nhttps://www.postgresql.org/message-id/flat/CAB0yre%3DRP_ho6Bq4cV23ELKxRcfhV2Yqrb1zHp0RfUPEWCnBRw%40mail.gmail.com\n[5]\nhttps://www.postgresql.org/message-id/flat/20180814213500.GA74618%4060f81dc409fc.ant.amazon.com\n[6] https://wiki.postgresql.org/wiki/PGCon_2021_Fun_With_WAL#Lazy_Restore\n\n\n-- \nBest regards,\nLubennikova Anastasia", "msg_date": "Wed, 30 Jun 2021 00:49:30 +0300", "msg_from": "Anastasia Lubennikova <lubennikovaav@gmail.com>", "msg_from_op": true, "msg_subject": "Extensible storage manager API - smgr hooks" }, { "msg_contents": "Anastasia Lubennikova писал 2021-06-30 00:49:\n> Hi, hackers!\n> \n> Many recently discussed features can make use of an extensible storage\n> manager API. Namely, storage level compression and encryption [1],\n> [2], [3], disk quota feature [4], SLRU storage changes [5], and any\n> other features that may want to substitute PostgreSQL storage layer\n> with their implementation (i.e. lazy_restore [6]).\n> \n> Attached is a proposal to change smgr API to make it extensible. The\n> idea is to add a hook for plugins to get control in smgr and define\n> custom storage managers. The patch replaces smgrsw[] array and smgr_sw\n> selector with smgr() function that loads f_smgr implementation.\n> \n> As before it has only one implementation - smgr_md, which is wrapped\n> into smgr_standard().\n> \n> To create custom implementation, a developer needs to implement smgr\n> API functions\n> static const struct f_smgr smgr_custom =\n> {\n> .smgr_init = custominit,\n> ...\n> }\n> \n> create a hook function\n> \n> const f_smgr * smgr_custom(BackendId backend, RelFileNode rnode)\n> {\n> //Here we can also add some logic and chose which smgr to use\n> based on rnode and backend\n> return &smgr_custom;\n> }\n> \n> and finally set the hook:\n> smgr_hook = smgr_custom;\n> \n> [1]\n> https://www.postgresql.org/message-id/flat/11996861554042351@iva4-dd95b404a60b.qloud-c.yandex.net\n> [2]\n> https://www.postgresql.org/message-id/flat/272dd2d9.e52a.17235f2c050.Coremail.chjischj%40163.com\n> [3] https://postgrespro.com/docs/enterprise/9.6/cfs\n> [4]\n> https://www.postgresql.org/message-id/flat/CAB0yre%3DRP_ho6Bq4cV23ELKxRcfhV2Yqrb1zHp0RfUPEWCnBRw%40mail.gmail.com\n> [5]\n> https://www.postgresql.org/message-id/flat/20180814213500.GA74618%4060f81dc409fc.ant.amazon.com\n> [6]\n> https://wiki.postgresql.org/wiki/PGCon_2021_Fun_With_WAL#Lazy_Restore\n> \n> --\n> \n> Best regards,\n> Lubennikova Anastasia\n\nGood day, Anastasia.\n\nI also think smgr should be extended with different implementations \naside of md.\nBut which way concrete implementation will be chosen for particular \nrelation?\nI believe it should be (immutable!) property of tablespace, and should \nbe passed\nto smgropen. Patch in current state doesn't show clear way to distinct \ndifferent\nimplementations per relation.\n\nI don't think patch should be that invasive. smgrsw could pointer to\narray instead of static array as it is of now, and then reln->smgr_which\nwill remain with same meaning. Yep it then will need a way to select \nspecific\nimplementation, but something like `char smgr_name[NAMEDATALEN]` field \nwith\nlinear search in (i believe) small smgrsw array should be enough.\n\nMaybe I'm missing something?\n\nregards,\nSokolov Yura.", "msg_date": "Wed, 30 Jun 2021 05:36:11 +0300", "msg_from": "Yura Sokolov <y.sokolov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: Extensible storage manager API - smgr hooks" }, { "msg_contents": "Hi,\n\nOn 2021-06-30 05:36:11 +0300, Yura Sokolov wrote:\n> Anastasia Lubennikova писал 2021-06-30 00:49:\n> > Hi, hackers!\n> > \n> > Many recently discussed features can make use of an extensible storage\n> > manager API. Namely, storage level compression and encryption [1],\n> > [2], [3], disk quota feature [4], SLRU storage changes [5], and any\n> > other features that may want to substitute PostgreSQL storage layer\n> > with their implementation (i.e. lazy_restore [6]).\n> > \n> > Attached is a proposal to change smgr API to make it extensible. The\n> > idea is to add a hook for plugins to get control in smgr and define\n> > custom storage managers. The patch replaces smgrsw[] array and smgr_sw\n> > selector with smgr() function that loads f_smgr implementation.\n> > \n> > As before it has only one implementation - smgr_md, which is wrapped\n> > into smgr_standard().\n> > \n> > To create custom implementation, a developer needs to implement smgr\n> > API functions\n> > static const struct f_smgr smgr_custom =\n> > {\n> > .smgr_init = custominit,\n> > ...\n> > }\n> > \n> > create a hook function\n> > \n> > const f_smgr * smgr_custom(BackendId backend, RelFileNode rnode)\n> > {\n> > //Here we can also add some logic and chose which smgr to use\n> > based on rnode and backend\n> > return &smgr_custom;\n> > }\n> > \n> > and finally set the hook:\n> > smgr_hook = smgr_custom;\n> > \n> > [1]\n> > https://www.postgresql.org/message-id/flat/11996861554042351@iva4-dd95b404a60b.qloud-c.yandex.net\n> > [2]\n> > https://www.postgresql.org/message-id/flat/272dd2d9.e52a.17235f2c050.Coremail.chjischj%40163.com\n> > [3] https://postgrespro.com/docs/enterprise/9.6/cfs\n> > [4]\n> > https://www.postgresql.org/message-id/flat/CAB0yre%3DRP_ho6Bq4cV23ELKxRcfhV2Yqrb1zHp0RfUPEWCnBRw%40mail.gmail.com\n> > [5]\n> > https://www.postgresql.org/message-id/flat/20180814213500.GA74618%4060f81dc409fc.ant.amazon.com\n> > [6]\n> > https://wiki.postgresql.org/wiki/PGCon_2021_Fun_With_WAL#Lazy_Restore\n> > \n> > --\n> > \n> > Best regards,\n> > Lubennikova Anastasia\n> \n> Good day, Anastasia.\n> \n> I also think smgr should be extended with different implementations aside of\n> md.\n> But which way concrete implementation will be chosen for particular\n> relation?\n> I believe it should be (immutable!) property of tablespace, and should be\n> passed\n> to smgropen. Patch in current state doesn't show clear way to distinct\n> different\n> implementations per relation.\n> \n> I don't think patch should be that invasive. smgrsw could pointer to\n> array instead of static array as it is of now, and then reln->smgr_which\n> will remain with same meaning. Yep it then will need a way to select\n> specific\n> implementation, but something like `char smgr_name[NAMEDATALEN]` field with\n> linear search in (i believe) small smgrsw array should be enough.\n> \n> Maybe I'm missing something?\n\nThere has been no activity on this thread for > 6 months. Therefore I'm\nmarking it as returned with feedback. Anastasia, if you want to work on this,\nplease do, but there's obviously no way it can be merged into 15...\n\nGreetings,\n\nAndres\n\n\n", "msg_date": "Mon, 21 Mar 2022 17:20:38 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: Extensible storage manager API - smgr hooks" }, { "msg_contents": "Hello Yura and Anastasia.\n\nI have tried to implement per-relation SMGR approach, and faced with a\nserious problem with redo.\n\nSo, to implement per-relation SMGR feature i have tried to do things\nsimilar to custom table AM apporach: that is, we can define our custom SMGR\nin an extention (which defines smgr handle) and then use this SMGR in\nrelation definition. like this:\n\n```postgres=# create extension proxy_smgr ;\nCREATE EXTENSION\npostgres=# select * from pg_smgr ;\n oid | smgrname | smgrhandler\n-------+------------+--------------------\n 4646 | md | smgr_md_handler\n 16386 | proxy_smgr | proxy_smgr_handler\n(2 rows)\n\npostgres=# create table tt(i int) storage manager proxy_smgr_handler;\nERROR: storage manager \"proxy_smgr_handler\" does not exist\npostgres=# create table tt(i int) storage manager proxy_smgr;\nINFO: proxy open 1663 5 16391\nINFO: proxy create 16391\nINFO: proxy close, 16391\nINFO: proxy close, 16391\nINFO: proxy close, 16391\nINFO: proxy close, 16391\nCREATE TABLE\npostgres=# select * from tt;\nINFO: proxy open 1663 5 16391\nINFO: proxy nblocks 16391\nINFO: proxy nblocks 16391\n i\n---\n(0 rows)\n\npostgres=# insert into tt values(1);\nINFO: proxy exists 16391\nINFO: proxy nblocks 16391\nINFO: proxy nblocks 16391\nINFO: proxcy extend 16391\nINSERT 0 1\npostgres=# select * from tt;\nINFO: proxy nblocks 16391\nINFO: proxy nblocks 16391\n i\n---\n 1\n(1 row)\n```\n\nextention sql files looks like this:\n\n```\nCREATE FUNCTION proxy_smgr_handler(internal)\nRETURNS table_smgr_handler\nAS 'MODULE_PATHNAME'\nLANGUAGE C;\n\n-- Storage manager\nCREATE STORAGE MANAGER proxy_smgr HANDLER proxy_smgr_handler;\n```\n\nTo do this i have defined catalog relation pg_smgr where i store smgr`s\nhandlers and use this relation when we need to open some other(non-catalog)\nrelations in smgropen function. The patch almost passes regression tests(8\nof 214 tests failed.) but it fails on first checkpoint or in crash\nrecorvery. Also, i have changed WAL format, added SMGR oid to each WAL\nrecord with RelFileNode structure. Why do we need WAL changes? well, i\ntried to solve folowing issue.\n\nAs i mentioned, there is a problem with redo, with is: we cannot do\nsyscache search to get relation`s SMGR to apply wal, because syscache is\nnot initialized during redo (crash recovery). As i understand, syscache is\nnot initialised because system catalogs are not consistent until crash\nrecovery is done.\n\n\nSo, thants it, I decided to write to this thread to get feedback and\nunderstand how best to solve the problem with redo.\n\nWhat do you think?\n\nOn Thu, Jun 16, 2022 at 1:38 PM Andres Freund <andres@anarazel.de> wrote:\n\n> Hi,\n>\n> On 2021-06-30 05:36:11 +0300, Yura Sokolov wrote:\n> > Anastasia Lubennikova писал 2021-06-30 00:49:\n> > > Hi, hackers!\n> > >\n> > > Many recently discussed features can make use of an extensible storage\n> > > manager API. Namely, storage level compression and encryption [1],\n> > > [2], [3], disk quota feature [4], SLRU storage changes [5], and any\n> > > other features that may want to substitute PostgreSQL storage layer\n> > > with their implementation (i.e. lazy_restore [6]).\n> > >\n> > > Attached is a proposal to change smgr API to make it extensible. The\n> > > idea is to add a hook for plugins to get control in smgr and define\n> > > custom storage managers. The patch replaces smgrsw[] array and smgr_sw\n> > > selector with smgr() function that loads f_smgr implementation.\n> > >\n> > > As before it has only one implementation - smgr_md, which is wrapped\n> > > into smgr_standard().\n> > >\n> > > To create custom implementation, a developer needs to implement smgr\n> > > API functions\n> > > static const struct f_smgr smgr_custom =\n> > > {\n> > > .smgr_init = custominit,\n> > > ...\n> > > }\n> > >\n> > > create a hook function\n> > >\n> > > const f_smgr * smgr_custom(BackendId backend, RelFileNode rnode)\n> > > {\n> > > //Here we can also add some logic and chose which smgr to use\n> > > based on rnode and backend\n> > > return &smgr_custom;\n> > > }\n> > >\n> > > and finally set the hook:\n> > > smgr_hook = smgr_custom;\n> > >\n> > > [1]\n> > >\n> https://www.postgresql.org/message-id/flat/11996861554042351@iva4-dd95b404a60b.qloud-c.yandex.net\n> > > [2]\n> > >\n> https://www.postgresql.org/message-id/flat/272dd2d9.e52a.17235f2c050.Coremail.chjischj%40163.com\n> > > [3] https://postgrespro.com/docs/enterprise/9.6/cfs\n> > > [4]\n> > >\n> https://www.postgresql.org/message-id/flat/CAB0yre%3DRP_ho6Bq4cV23ELKxRcfhV2Yqrb1zHp0RfUPEWCnBRw%40mail.gmail.com\n> > > [5]\n> > >\n> https://www.postgresql.org/message-id/flat/20180814213500.GA74618%4060f81dc409fc.ant.amazon.com\n> > > [6]\n> > > https://wiki.postgresql.org/wiki/PGCon_2021_Fun_With_WAL#Lazy_Restore\n> > >\n> > > --\n> > >\n> > > Best regards,\n> > > Lubennikova Anastasia\n> >\n> > Good day, Anastasia.\n> >\n> > I also think smgr should be extended with different implementations\n> aside of\n> > md.\n> > But which way concrete implementation will be chosen for particular\n> > relation?\n> > I believe it should be (immutable!) property of tablespace, and should be\n> > passed\n> > to smgropen. Patch in current state doesn't show clear way to distinct\n> > different\n> > implementations per relation.\n> >\n> > I don't think patch should be that invasive. smgrsw could pointer to\n> > array instead of static array as it is of now, and then reln->smgr_which\n> > will remain with same meaning. Yep it then will need a way to select\n> > specific\n> > implementation, but something like `char smgr_name[NAMEDATALEN]` field\n> with\n> > linear search in (i believe) small smgrsw array should be enough.\n> >\n> > Maybe I'm missing something?\n>\n> There has been no activity on this thread for > 6 months. Therefore I'm\n> marking it as returned with feedback. Anastasia, if you want to work on\n> this,\n> please do, but there's obviously no way it can be merged into 15...\n>\n> Greetings,\n>\n> Andres\n>\n>\n>\n>\n>", "msg_date": "Thu, 16 Jun 2022 13:41:21 +0500", "msg_from": "Kirill Reshke <reshke@double.cloud>", "msg_from_op": false, "msg_subject": "Re: Extensible storage manager API - smgr hooks" }, { "msg_contents": "\n\n> On 16 Jun 2022, at 13:41, Kirill Reshke <reshke@double.cloud> wrote:\n> \n> Hello Yura and Anastasia.\n\nFWIW this technology is now a part of Greenplum [0]. We are building GP extension that automatically offloads cold data to S3 - a very simplified version of Neon for analytical workloads.\nWhen a segment of a table is not used for a long period of time, extension will sync files with backup storage in the Cloud.\nWhen the user touches data, extension's smgr will bring table segments back from backup or latest synced version.\n\nOur #1 goal is to provide a tool useful for the community. We easily can provide same extension for Postgres if this technology (extensible smgr) is in core. Does such an extension seem useful for Postgres? Or does this data access pattern seems unusual for Postgres? By pattern I mean vast amounts of cold data only ever appended and never touched.\n\n\nBest regards, Andrey Borodin.\n\n[0] https://github.com/greenplum-db/gpdb/pull/13601\n\n", "msg_date": "Fri, 26 Aug 2022 10:25:58 +0500", "msg_from": "Andrey Borodin <x4mmm@yandex-team.ru>", "msg_from_op": false, "msg_subject": "Re: Extensible storage manager API - smgr hooks" } ]
[ { "msg_contents": "PITR of Abort Prepared generates the wrong log message.\n\nFix attached\n\n-- \nSimon Riggs http://www.EnterpriseDB.com/", "msg_date": "Tue, 29 Jun 2021 23:03:30 +0100", "msg_from": "Simon Riggs <simon.riggs@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Fix PITR msg for Abort Prepared" }, { "msg_contents": "On Tue, Jun 29, 2021 at 11:03:30PM +0100, Simon Riggs wrote:\n> PITR of Abort Prepared generates the wrong log message.\n\nGood catch! This is wrong since 4f1b890 and 9.5, so this needs a\nbackpatch all the way down.\n--\nMichael", "msg_date": "Wed, 30 Jun 2021 10:12:45 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Fix PITR msg for Abort Prepared" } ]
[ { "msg_contents": "Hello!\n\nTomáš Vondra has shared a few ideas to improve BRIN index in czech\nPostgreSQL mail list some time ago [1 , in czech only]. This is first\ntry to implement one of those ideas.\n\nCurrently BRIN index blocks HOT update even it is not linked tuples\ndirectly. I'm attaching the initial patch allowing HOT update even on\nBRIN indexed columns. This patch went through an initial review on\nczech PostgreSQL mail list [1].\n\nIt can be viewed online (latest version) on GitHub [2] as well.\n\n- small overview\n\n1. I have added \"amhotblocking\" flag to index AM descriptor set to\n\"true\" for all, except BRIN, index types. And later in heap_update\nmethod (heapam.c) I do filter attributes based on this new flag,\ninstead of currently checking for any existing index.\n\n2. I had to enhance the \"RelationGetIndexAttrBitmap\" function to be\nable to return a bitmap of index attribute numbers related to the new\nAM flag using \"INDEX_ATTR_BITMAP_HOT_BLOCKING\" filter.\nPS: Originally the \"INDEX_ATTR_BITMAP_ALL\" filter was used for HOT\ncheck update and most likely could be removed (including all logic\nrelated in RelationGetIndexAttrBitmap), since I have not found any\nother usage.\n\n3. I have created an initial regression test using\n\"pg_stat_get_tuples_hot_updated\" to find out HOT was successful on the\nBRIN indexed column. Unfortunately \"pg_stat_get_tuples_hot_updated\" is\nnot updated immediately and I have not found any way to enforce the\nupdate. Thus (at least for now) I have used a similar approach to\nstats.sql using the \"wait_for_stats\" function (waiting for 30 seconds\nand checking each 100ms for change).\n\nI'm attaching patch to CF 2021-07.\n\n[1] https://groups.google.com/g/postgresql-cz/c/oxA_v3H17Qg\n[2] https://github.com/simi/postgres/pull/7", "msg_date": "Wed, 30 Jun 2021 00:31:25 +0200", "msg_from": "=?UTF-8?B?Sm9zZWYgxaBpbcOhbmVr?= <josef.simanek@gmail.com>", "msg_from_op": true, "msg_subject": "[PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "st 30. 6. 2021 v 0:31 odesílatel Josef Šimánek <josef.simanek@gmail.com> napsal:\n>\n> Hello!\n>\n> Tomáš Vondra has shared a few ideas to improve BRIN index in czech\n> PostgreSQL mail list some time ago [1 , in czech only]. This is first\n> try to implement one of those ideas.\n>\n> Currently BRIN index blocks HOT update even it is not linked tuples\n> directly. I'm attaching the initial patch allowing HOT update even on\n> BRIN indexed columns. This patch went through an initial review on\n> czech PostgreSQL mail list [1].\n\nI just found out current patch is breaking partial-index isolation\ntest. I'm looking into this problem.\n\n> It can be viewed online (latest version) on GitHub [2] as well.\n>\n> - small overview\n>\n> 1. I have added \"amhotblocking\" flag to index AM descriptor set to\n> \"true\" for all, except BRIN, index types. And later in heap_update\n> method (heapam.c) I do filter attributes based on this new flag,\n> instead of currently checking for any existing index.\n>\n> 2. I had to enhance the \"RelationGetIndexAttrBitmap\" function to be\n> able to return a bitmap of index attribute numbers related to the new\n> AM flag using \"INDEX_ATTR_BITMAP_HOT_BLOCKING\" filter.\n> PS: Originally the \"INDEX_ATTR_BITMAP_ALL\" filter was used for HOT\n> check update and most likely could be removed (including all logic\n> related in RelationGetIndexAttrBitmap), since I have not found any\n> other usage.\n>\n> 3. I have created an initial regression test using\n> \"pg_stat_get_tuples_hot_updated\" to find out HOT was successful on the\n> BRIN indexed column. Unfortunately \"pg_stat_get_tuples_hot_updated\" is\n> not updated immediately and I have not found any way to enforce the\n> update. Thus (at least for now) I have used a similar approach to\n> stats.sql using the \"wait_for_stats\" function (waiting for 30 seconds\n> and checking each 100ms for change).\n>\n> I'm attaching patch to CF 2021-07.\n>\n> [1] https://groups.google.com/g/postgresql-cz/c/oxA_v3H17Qg\n> [2] https://github.com/simi/postgres/pull/7\n\n\n", "msg_date": "Wed, 30 Jun 2021 00:53:02 +0200", "msg_from": "=?UTF-8?B?Sm9zZWYgxaBpbcOhbmVr?= <josef.simanek@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "\n\nOn 6/30/21 12:53 AM, Josef Šimánek wrote:\n> st 30. 6. 2021 v 0:31 odesílatel Josef Šimánek <josef.simanek@gmail.com> napsal:\n>>\n>> Hello!\n>>\n>> Tomáš Vondra has shared a few ideas to improve BRIN index in czech\n>> PostgreSQL mail list some time ago [1 , in czech only]. This is first\n>> try to implement one of those ideas.\n>>\n>> Currently BRIN index blocks HOT update even it is not linked tuples\n>> directly. I'm attaching the initial patch allowing HOT update even on\n>> BRIN indexed columns. This patch went through an initial review on\n>> czech PostgreSQL mail list [1].\n> \n> I just found out current patch is breaking partial-index isolation\n> test. I'm looking into this problem.\n> \n\nThe problem is in RelationGetIndexAttrBitmap - the existing code first \nwalks indnatts, and builds the indexattrs / hotblockingattrs. But then \nit also inspects expressions and the predicate (by pull_varattnos), and \nthe patch fails to do that for hotblockingattrs. Which is why it fails \nfor partial-index, because that uses an index with a predicate.\n\nSo there needs to be something like:\n\n if (indexDesc->rd_indam->amhotblocking)\n pull_varattnos(indexExpressions, 1, &hotblockingattrs);\n\n if (indexDesc->rd_indam->amhotblocking)\n pull_varattnos(indexPredicate, 1, &hotblockingattrs);\n\nThis fixes the failure for me.\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Wed, 30 Jun 2021 01:20:01 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "st 30. 6. 2021 v 1:20 odesílatel Tomas Vondra\n<tomas.vondra@enterprisedb.com> napsal:\n>\n>\n>\n> On 6/30/21 12:53 AM, Josef Šimánek wrote:\n> > st 30. 6. 2021 v 0:31 odesílatel Josef Šimánek <josef.simanek@gmail.com> napsal:\n> >>\n> >> Hello!\n> >>\n> >> Tomáš Vondra has shared a few ideas to improve BRIN index in czech\n> >> PostgreSQL mail list some time ago [1 , in czech only]. This is first\n> >> try to implement one of those ideas.\n> >>\n> >> Currently BRIN index blocks HOT update even it is not linked tuples\n> >> directly. I'm attaching the initial patch allowing HOT update even on\n> >> BRIN indexed columns. This patch went through an initial review on\n> >> czech PostgreSQL mail list [1].\n> >\n> > I just found out current patch is breaking partial-index isolation\n> > test. I'm looking into this problem.\n> >\n>\n> The problem is in RelationGetIndexAttrBitmap - the existing code first\n> walks indnatts, and builds the indexattrs / hotblockingattrs. But then\n> it also inspects expressions and the predicate (by pull_varattnos), and\n> the patch fails to do that for hotblockingattrs. Which is why it fails\n> for partial-index, because that uses an index with a predicate.\n>\n> So there needs to be something like:\n>\n> if (indexDesc->rd_indam->amhotblocking)\n> pull_varattnos(indexExpressions, 1, &hotblockingattrs);\n>\n> if (indexDesc->rd_indam->amhotblocking)\n> pull_varattnos(indexPredicate, 1, &hotblockingattrs);\n>\n> This fixes the failure for me.\n\nThanks for the hint. I'm attaching a fixed standalone patch.\n\n> regards\n>\n> --\n> Tomas Vondra\n> EnterpriseDB: http://www.enterprisedb.com\n> The Enterprise PostgreSQL Company", "msg_date": "Wed, 30 Jun 2021 01:43:15 +0200", "msg_from": "=?UTF-8?B?Sm9zZWYgxaBpbcOhbmVr?= <josef.simanek@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "On 6/30/21 1:43 AM, Josef Šimánek wrote:\n> st 30. 6. 2021 v 1:20 odesílatel Tomas Vondra\n> <tomas.vondra@enterprisedb.com> napsal:\n>>\n>>\n>>\n>> On 6/30/21 12:53 AM, Josef Šimánek wrote:\n>>> st 30. 6. 2021 v 0:31 odesílatel Josef Šimánek <josef.simanek@gmail.com> napsal:\n>>>>\n>>>> Hello!\n>>>>\n>>>> Tomáš Vondra has shared a few ideas to improve BRIN index in czech\n>>>> PostgreSQL mail list some time ago [1 , in czech only]. This is first\n>>>> try to implement one of those ideas.\n>>>>\n>>>> Currently BRIN index blocks HOT update even it is not linked tuples\n>>>> directly. I'm attaching the initial patch allowing HOT update even on\n>>>> BRIN indexed columns. This patch went through an initial review on\n>>>> czech PostgreSQL mail list [1].\n>>>\n>>> I just found out current patch is breaking partial-index isolation\n>>> test. I'm looking into this problem.\n>>>\n>>\n>> The problem is in RelationGetIndexAttrBitmap - the existing code first\n>> walks indnatts, and builds the indexattrs / hotblockingattrs. But then\n>> it also inspects expressions and the predicate (by pull_varattnos), and\n>> the patch fails to do that for hotblockingattrs. Which is why it fails\n>> for partial-index, because that uses an index with a predicate.\n>>\n>> So there needs to be something like:\n>>\n>> if (indexDesc->rd_indam->amhotblocking)\n>> pull_varattnos(indexExpressions, 1, &hotblockingattrs);\n>>\n>> if (indexDesc->rd_indam->amhotblocking)\n>> pull_varattnos(indexPredicate, 1, &hotblockingattrs);\n>>\n>> This fixes the failure for me.\n> \n> Thanks for the hint. I'm attaching a fixed standalone patch.\n> \n\nThanks, this version seems to be working fine and passes check-world. So\nI did another round of review, and all I have are some simple comments:\n\n\n1) naming stuff (this is very subjective, feel free to disagree)\n\nI wonder if we should rename 'amhotblocking' to 'amblockshot' which\nseems more natural to me?\n\nSimilarly, maybe rename rd_hotblockingattr to rd_hotattr\n\n\n2) Do we actually need to calculate and store hotblockingattrs\nseparately in RelationGetIndexAttrBitmap? It seems to me it's either\nNULL (with amhotblocking=false) or equal to indexattrs. So why not to\njust get rid of hotblockingattr and rd_hotblockingattr, and do something\nlike\n\n case INDEX_ATTR_BITMAP_HOT_BLOCKING:\n return (amhotblocking) ? bms_copy(rel->rd_hotblockingattr) : NULL;\n\nI haven't tried, so maybe I'm missing something?\n\n\n3) The patch should update indexam.sgml with description of the new\nfield, amhotblocking or how it'll end up named.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 12 Jul 2021 22:31:16 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "On 2021-Jul-12, Tomas Vondra wrote:\n\n> 2) Do we actually need to calculate and store hotblockingattrs\n> separately in RelationGetIndexAttrBitmap? It seems to me it's either\n> NULL (with amhotblocking=false) or equal to indexattrs. So why not to\n> just get rid of hotblockingattr and rd_hotblockingattr, and do something\n> like\n> \n> case INDEX_ATTR_BITMAP_HOT_BLOCKING:\n> return (amhotblocking) ? bms_copy(rel->rd_hotblockingattr) : NULL;\n> \n> I haven't tried, so maybe I'm missing something?\n\n... What? I thought the whole point is that BRIN indexes do not cause\nthe columns to become part of this set, while all other index types do.\nIf you make them both the same, then there's no point.\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\"La libertad es como el dinero; el que no la sabe emplear la pierde\" (Alvarez)\n\n\n", "msg_date": "Mon, 12 Jul 2021 16:37:40 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "\n\nOn 7/12/21 10:37 PM, Alvaro Herrera wrote:\n> On 2021-Jul-12, Tomas Vondra wrote:\n> \n>> 2) Do we actually need to calculate and store hotblockingattrs\n>> separately in RelationGetIndexAttrBitmap? It seems to me it's either\n>> NULL (with amhotblocking=false) or equal to indexattrs. So why not to\n>> just get rid of hotblockingattr and rd_hotblockingattr, and do something\n>> like\n>>\n>> case INDEX_ATTR_BITMAP_HOT_BLOCKING:\n>> return (amhotblocking) ? bms_copy(rel->rd_hotblockingattr) : NULL;\n>>\n>> I haven't tried, so maybe I'm missing something?\n> \n> ... What? I thought the whole point is that BRIN indexes do not cause\n> the columns to become part of this set, while all other index types do.\n> If you make them both the same, then there's no point.\n> \n\nWell, one of us is confused and it might be me ;-)\n\nThe point is that BRIN is the only index type with amhotblocking=false,\nso it would return NULL (and thus it does not block HOT). All other\nindexes AMs have amblocking=true and so should return rd_indexattr (I\nforgot to change that in the code chunk).\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 12 Jul 2021 22:45:24 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "po 12. 7. 2021 v 22:31 odesílatel Tomas Vondra\n<tomas.vondra@enterprisedb.com> napsal:\n>\n> On 6/30/21 1:43 AM, Josef Šimánek wrote:\n> > st 30. 6. 2021 v 1:20 odesílatel Tomas Vondra\n> > <tomas.vondra@enterprisedb.com> napsal:\n> >>\n> >>\n> >>\n> >> On 6/30/21 12:53 AM, Josef Šimánek wrote:\n> >>> st 30. 6. 2021 v 0:31 odesílatel Josef Šimánek <josef.simanek@gmail.com> napsal:\n> >>>>\n> >>>> Hello!\n> >>>>\n> >>>> Tomáš Vondra has shared a few ideas to improve BRIN index in czech\n> >>>> PostgreSQL mail list some time ago [1 , in czech only]. This is first\n> >>>> try to implement one of those ideas.\n> >>>>\n> >>>> Currently BRIN index blocks HOT update even it is not linked tuples\n> >>>> directly. I'm attaching the initial patch allowing HOT update even on\n> >>>> BRIN indexed columns. This patch went through an initial review on\n> >>>> czech PostgreSQL mail list [1].\n> >>>\n> >>> I just found out current patch is breaking partial-index isolation\n> >>> test. I'm looking into this problem.\n> >>>\n> >>\n> >> The problem is in RelationGetIndexAttrBitmap - the existing code first\n> >> walks indnatts, and builds the indexattrs / hotblockingattrs. But then\n> >> it also inspects expressions and the predicate (by pull_varattnos), and\n> >> the patch fails to do that for hotblockingattrs. Which is why it fails\n> >> for partial-index, because that uses an index with a predicate.\n> >>\n> >> So there needs to be something like:\n> >>\n> >> if (indexDesc->rd_indam->amhotblocking)\n> >> pull_varattnos(indexExpressions, 1, &hotblockingattrs);\n> >>\n> >> if (indexDesc->rd_indam->amhotblocking)\n> >> pull_varattnos(indexPredicate, 1, &hotblockingattrs);\n> >>\n> >> This fixes the failure for me.\n> >\n> > Thanks for the hint. I'm attaching a fixed standalone patch.\n> >\n>\n> Thanks, this version seems to be working fine and passes check-world. So\n> I did another round of review, and all I have are some simple comments:\n>\n>\n> 1) naming stuff (this is very subjective, feel free to disagree)\n>\n> I wonder if we should rename 'amhotblocking' to 'amblockshot' which\n> seems more natural to me?\n>\n> Similarly, maybe rename rd_hotblockingattr to rd_hotattr\n\nOK, I wasn't sure about the naming.\n\n>\n> 2) Do we actually need to calculate and store hotblockingattrs\n> separately in RelationGetIndexAttrBitmap? It seems to me it's either\n> NULL (with amhotblocking=false) or equal to indexattrs. So why not to\n> just get rid of hotblockingattr and rd_hotblockingattr, and do something\n> like\n>\n> case INDEX_ATTR_BITMAP_HOT_BLOCKING:\n> return (amhotblocking) ? bms_copy(rel->rd_hotblockingattr) : NULL;\n>\n> I haven't tried, so maybe I'm missing something?\n\nrelation->rd_indexattr is currently not used (at least I have not\nfound anything) for anything, except looking if other values are\nalready loaded.\n\n/* Quick exit if we already computed the result. */\nif (relation->rd_indexattr != NULL)\n\nI think it could be replaced with boolean to make it clear other\nvalues (rd_keyattr, rd_pkattr, rd_idattr, rd_hotblockingattr) are\nalready loaded.\n>\n> 3) The patch should update indexam.sgml with description of the new\n> field, amhotblocking or how it'll end up named.\n\nI'll do.\n\n>\n> regards\n>\n> --\n> Tomas Vondra\n> EnterpriseDB: http://www.enterprisedb.com\n> The Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 12 Jul 2021 22:45:30 +0200", "msg_from": "=?UTF-8?B?Sm9zZWYgxaBpbcOhbmVr?= <josef.simanek@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "\n\nOn 7/12/21 10:45 PM, Josef Šimánek wrote:\n> po 12. 7. 2021 v 22:31 odesílatel Tomas Vondra\n> <tomas.vondra@enterprisedb.com> napsal:\n>>\n>> On 6/30/21 1:43 AM, Josef Šimánek wrote:\n>>> st 30. 6. 2021 v 1:20 odesílatel Tomas Vondra\n>>> <tomas.vondra@enterprisedb.com> napsal:\n>>>>\n>>>>\n>>>>\n>>>> On 6/30/21 12:53 AM, Josef Šimánek wrote:\n>>>>> st 30. 6. 2021 v 0:31 odesílatel Josef Šimánek <josef.simanek@gmail.com> napsal:\n>>>>>>\n>>>>>> Hello!\n>>>>>>\n>>>>>> Tomáš Vondra has shared a few ideas to improve BRIN index in czech\n>>>>>> PostgreSQL mail list some time ago [1 , in czech only]. This is first\n>>>>>> try to implement one of those ideas.\n>>>>>>\n>>>>>> Currently BRIN index blocks HOT update even it is not linked tuples\n>>>>>> directly. I'm attaching the initial patch allowing HOT update even on\n>>>>>> BRIN indexed columns. This patch went through an initial review on\n>>>>>> czech PostgreSQL mail list [1].\n>>>>>\n>>>>> I just found out current patch is breaking partial-index isolation\n>>>>> test. I'm looking into this problem.\n>>>>>\n>>>>\n>>>> The problem is in RelationGetIndexAttrBitmap - the existing code first\n>>>> walks indnatts, and builds the indexattrs / hotblockingattrs. But then\n>>>> it also inspects expressions and the predicate (by pull_varattnos), and\n>>>> the patch fails to do that for hotblockingattrs. Which is why it fails\n>>>> for partial-index, because that uses an index with a predicate.\n>>>>\n>>>> So there needs to be something like:\n>>>>\n>>>> if (indexDesc->rd_indam->amhotblocking)\n>>>> pull_varattnos(indexExpressions, 1, &hotblockingattrs);\n>>>>\n>>>> if (indexDesc->rd_indam->amhotblocking)\n>>>> pull_varattnos(indexPredicate, 1, &hotblockingattrs);\n>>>>\n>>>> This fixes the failure for me.\n>>>\n>>> Thanks for the hint. I'm attaching a fixed standalone patch.\n>>>\n>>\n>> Thanks, this version seems to be working fine and passes check-world. So\n>> I did another round of review, and all I have are some simple comments:\n>>\n>>\n>> 1) naming stuff (this is very subjective, feel free to disagree)\n>>\n>> I wonder if we should rename 'amhotblocking' to 'amblockshot' which\n>> seems more natural to me?\n>>\n>> Similarly, maybe rename rd_hotblockingattr to rd_hotattr\n> \n> OK, I wasn't sure about the naming.\n> \n\nTBH I'm not sure either.\n\n>>\n>> 2) Do we actually need to calculate and store hotblockingattrs\n>> separately in RelationGetIndexAttrBitmap? It seems to me it's either\n>> NULL (with amhotblocking=false) or equal to indexattrs. So why not to\n>> just get rid of hotblockingattr and rd_hotblockingattr, and do something\n>> like\n>>\n>> case INDEX_ATTR_BITMAP_HOT_BLOCKING:\n>> return (amhotblocking) ? bms_copy(rel->rd_hotblockingattr) : NULL;\n>>\n>> I haven't tried, so maybe I'm missing something?\n> \n> relation->rd_indexattr is currently not used (at least I have not\n> found anything) for anything, except looking if other values are\n> already loaded.\n> \n> /* Quick exit if we already computed the result. */\n> if (relation->rd_indexattr != NULL)\n> \n> I think it could be replaced with boolean to make it clear other\n> values (rd_keyattr, rd_pkattr, rd_idattr, rd_hotblockingattr) are\n> already loaded.\n\nWell, RelationGetIndexAttrBitmap is accessible from extensions, so it\nmight be used by code passing INDEX_ATTR_BITMAP_ALL. Not sure if there's\nsuch code, of course.\n\nMy point is that for amhotblocking=true the bitmaps seem to be exactly\nthe same, so we can calculate it just once (so replacing it with a bool\nflag would not save us anything).\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 12 Jul 2021 22:54:54 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "On 2021-Jul-12, Tomas Vondra wrote:\n\n> Well, one of us is confused and it might be me ;-)\n\n:-)\n\n> The point is that BRIN is the only index type with amhotblocking=false,\n> so it would return NULL (and thus it does not block HOT). All other\n> indexes AMs have amblocking=true and so should return rd_indexattr (I\n> forgot to change that in the code chunk).\n\nBut RelationGetIndexAttrBitmap is called for the table that contains the\nindex (and probably contains some other indexes too), not for one\nspecific index. So the bitmap is about the columns involved in *all*\nindexes of the table ...\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\"El destino baraja y nosotros jugamos\" (A. Schopenhauer)\n\n\n", "msg_date": "Mon, 12 Jul 2021 16:55:31 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "On 7/12/21 10:55 PM, Alvaro Herrera wrote:\n> On 2021-Jul-12, Tomas Vondra wrote:\n> \n>> Well, one of us is confused and it might be me ;-)\n> \n> :-)\n> \n>> The point is that BRIN is the only index type with amhotblocking=false,\n>> so it would return NULL (and thus it does not block HOT). All other\n>> indexes AMs have amblocking=true and so should return rd_indexattr (I\n>> forgot to change that in the code chunk).\n> \n> But RelationGetIndexAttrBitmap is called for the table that contains the\n> index (and probably contains some other indexes too), not for one\n> specific index. So the bitmap is about the columns involved in *all*\n> indexes of the table ...\n> \n\nD'oh! Well, I did say I might be confused ...\n\nYeah, that optimization is not possible, unfortunately.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 12 Jul 2021 23:00:05 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "On 2021-Jul-12, Josef Šimánek wrote:\n\n> > 2) Do we actually need to calculate and store hotblockingattrs\n> > separately in RelationGetIndexAttrBitmap? It seems to me it's either\n> > NULL (with amhotblocking=false) or equal to indexattrs. So why not to\n> > just get rid of hotblockingattr and rd_hotblockingattr, and do something\n> > like\n> >\n> > case INDEX_ATTR_BITMAP_HOT_BLOCKING:\n> > return (amhotblocking) ? bms_copy(rel->rd_hotblockingattr) : NULL;\n> >\n> > I haven't tried, so maybe I'm missing something?\n> \n> relation->rd_indexattr is currently not used (at least I have not\n> found anything) for anything, except looking if other values are\n> already loaded.\n\nOh, that's interesting. What this means is that INDEX_ATTR_BITMAP_ALL\nis no longer used; its uses must have all been replaced by something\nelse. It seems the only one that currently exists is for HOT in\nheap_update, which this patch replaces with the new one. In a quick\nsearch, no external code depends on it, so I'd be inclined to just\nremove it ...\n\nI think a boolean is much simpler. Consider a table with 1600 columns :-)\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"Puedes vivir sólo una vez, pero si lo haces bien, una vez es suficiente\"\n\n\n", "msg_date": "Mon, 12 Jul 2021 17:02:09 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "\n\nOn 7/12/21 11:02 PM, Alvaro Herrera wrote:\n> On 2021-Jul-12, Josef Šimánek wrote:\n> \n>>> 2) Do we actually need to calculate and store hotblockingattrs\n>>> separately in RelationGetIndexAttrBitmap? It seems to me it's either\n>>> NULL (with amhotblocking=false) or equal to indexattrs. So why not to\n>>> just get rid of hotblockingattr and rd_hotblockingattr, and do something\n>>> like\n>>>\n>>> case INDEX_ATTR_BITMAP_HOT_BLOCKING:\n>>> return (amhotblocking) ? bms_copy(rel->rd_hotblockingattr) : NULL;\n>>>\n>>> I haven't tried, so maybe I'm missing something?\n>>\n>> relation->rd_indexattr is currently not used (at least I have not\n>> found anything) for anything, except looking if other values are\n>> already loaded.\n> \n> Oh, that's interesting. What this means is that INDEX_ATTR_BITMAP_ALL\n> is no longer used; its uses must have all been replaced by something\n> else. It seems the only one that currently exists is for HOT in\n> heap_update, which this patch replaces with the new one. In a quick\n> search, no external code depends on it, so I'd be inclined to just\n> remove it ...\n> \n> I think a boolean is much simpler. Consider a table with 1600 columns :-)\n> \n\nI'm not sure how to verify no external code depends on that flag. I have\nno idea if there's a plausible use case for it, though.\n\nEven with 1600 columns the amount of wasted memory is only about 200B,\nwhich is not that bad I think. Not great, not terrible.\n\nOTOH most tables won't have any BRIN indexes, in which case indexattr\nand hotblockingattr are guaranteed to be exactly the same. So maybe\nthat's something we could leverage - we need to calculate the \"hot\"\nbitmap, and in most cases we can use it for indexattr too.\n\nMaybe let's leave that for a separate patch, though?\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 12 Jul 2021 23:15:04 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "On 2021-Jul-12, Tomas Vondra wrote:\n\n> I'm not sure how to verify no external code depends on that flag. I have\n> no idea if there's a plausible use case for it, though.\n\nBut we don't *have* to, do we?\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\"Tiene valor aquel que admite que es un cobarde\" (Fernandel)\n\n\n", "msg_date": "Mon, 12 Jul 2021 19:00:37 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "po 12. 7. 2021 v 23:15 odesílatel Tomas Vondra\n<tomas.vondra@enterprisedb.com> napsal:\n>\n>\n>\n> On 7/12/21 11:02 PM, Alvaro Herrera wrote:\n> > On 2021-Jul-12, Josef Šimánek wrote:\n> >\n> >>> 2) Do we actually need to calculate and store hotblockingattrs\n> >>> separately in RelationGetIndexAttrBitmap? It seems to me it's either\n> >>> NULL (with amhotblocking=false) or equal to indexattrs. So why not to\n> >>> just get rid of hotblockingattr and rd_hotblockingattr, and do something\n> >>> like\n> >>>\n> >>> case INDEX_ATTR_BITMAP_HOT_BLOCKING:\n> >>> return (amhotblocking) ? bms_copy(rel->rd_hotblockingattr) : NULL;\n> >>>\n> >>> I haven't tried, so maybe I'm missing something?\n> >>\n> >> relation->rd_indexattr is currently not used (at least I have not\n> >> found anything) for anything, except looking if other values are\n> >> already loaded.\n> >\n> > Oh, that's interesting. What this means is that INDEX_ATTR_BITMAP_ALL\n> > is no longer used; its uses must have all been replaced by something\n> > else. It seems the only one that currently exists is for HOT in\n> > heap_update, which this patch replaces with the new one. In a quick\n> > search, no external code depends on it, so I'd be inclined to just\n> > remove it ...\n> >\n> > I think a boolean is much simpler. Consider a table with 1600 columns :-)\n> >\n>\n> I'm not sure how to verify no external code depends on that flag. I have\n> no idea if there's a plausible use case for it, though.\n\nI tried GitHub search before to ensure at least it is not a widely\nused \"API\". There were no results outside of PostgreSQL code itself in\nfirst 10 pages of results.\n\n\n> Even with 1600 columns the amount of wasted memory is only about 200B,\n> which is not that bad I think. Not great, not terrible.\n>\n> OTOH most tables won't have any BRIN indexes, in which case indexattr\n> and hotblockingattr are guaranteed to be exactly the same. So maybe\n> that's something we could leverage - we need to calculate the \"hot\"\n> bitmap, and in most cases we can use it for indexattr too.\n>\n> Maybe let's leave that for a separate patch, though?\n>\n>\n> regards\n>\n> --\n> Tomas Vondra\n> EnterpriseDB: http://www.enterprisedb.com\n> The Enterprise PostgreSQL Company\n\n\n", "msg_date": "Tue, 13 Jul 2021 01:02:19 +0200", "msg_from": "=?UTF-8?B?Sm9zZWYgxaBpbcOhbmVr?= <josef.simanek@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "Hi,\n\nI took a look at this patch again to see if I can get it polished and \nfixed. Per the discussion, I've removed the rd_indexattr list and \nreplaced it with a simple flag. While doing so, I noticed a couple of \nplaces that should have consider (init or free) rd_hotblockingattr.\n\nPatch 0001 is the v2, 0002 removes the rd_indexattr etc.\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Mon, 4 Oct 2021 16:17:47 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "po 4. 10. 2021 v 16:17 odesílatel Tomas Vondra\n<tomas.vondra@enterprisedb.com> napsal:\n>\n> Hi,\n>\n> I took a look at this patch again to see if I can get it polished and\n> fixed. Per the discussion, I've removed the rd_indexattr list and\n> replaced it with a simple flag. While doing so, I noticed a couple of\n> places that should have consider (init or free) rd_hotblockingattr.\n\nThanks for finishing this. I can confirm both patches do apply without\nproblems. I did some simple testing locally and everything worked as\nintended.\n\n> Patch 0001 is the v2, 0002 removes the rd_indexattr etc.\n>\n> regards\n>\n> --\n> Tomas Vondra\n> EnterpriseDB: http://www.enterprisedb.com\n> The Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sun, 10 Oct 2021 23:36:08 +0200", "msg_from": "=?UTF-8?B?Sm9zZWYgxaBpbcOhbmVr?= <josef.simanek@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "Hi,\n\nI've polished the patch a bit, with the goal to get it committed. I've \nadded the new amhotblocking flag to indexam.sgml (and I'm wondering if \nthere's another place in docs for more details).\n\nBut then I realized there's an issue in handling the predicate. Consider \nthis example:\n\n drop table if exists t;\n create table t (a int, b int);\n insert into t values (1, 100);\n create index on t using brin (b) where a = 2;\n\n update t set a = 2;\n\n explain analyze select * from t where a = 2 and b = 100;\n set enable_seqscan = off;\n explain analyze select * from t where a = 2 and b = 100;\n\nWith the previous version of the patch, the explains are this:\n\n QUERY PLAN\n ----------------------------------------------------------------------\n Seq Scan on t (cost=0.00..1.01 rows=1 width=8)\n (actual time=0.006..0.007 rows=1 loops=1)\n Filter: ((a = 2) AND (b = 100))\n Planning Time: 0.040 ms\n Execution Time: 0.018 ms\n (4 rows)\n\n QUERY PLAN\n ----------------------------------------------------------------------\n Bitmap Heap Scan on t (cost=12.03..16.05 rows=1 width=8)\n (actual time=0.007..0.009 rows=0 loops=1)\n Recheck Cond: ((b = 100) AND (a = 2))\n -> Bitmap Index Scan on t_b_idx (cost=0.00..12.03 rows=1 width=0)\n (actual time=0.006..0.006 rows=0 loops=1)\n Index Cond: (b = 100)\n Planning Time: 0.041 ms\n Execution Time: 0.026 ms\n (6 rows)\n\nNotice that the second plan (using the brin index) produces 0 rows, \nwhich is obviously wrong. Clearly, the index was not updated.\n\nI think this is caused by simple thinko in RelationGetIndexAttrBitmap, \nwhich did this:\n\n /* Collect all attributes in the index predicate, too */\n if (indexDesc->rd_indam->amhotblocking)\n pull_varattnos(indexPredicate, 1, &hotblockingattrs);\n\nI think this is wrong - we should not ignore the predicate based on \namhotblocking, because then we'll fail to notice an update making the \ntuple match the index predicate (as in the example).\n\nThe way I understand heap_update() it does not try to determine if the \nupdate makes the tuple indexable, it just disables HOT when it might \nhappen. The attached patch just calls pull_varattnos every time.\n\nI wonder if we might be a bit smarter about the predicates vs. HOT, and \ndisable HOT only when the tuple becomes indexable after the update.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Sat, 6 Nov 2021 03:16:02 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "OK,\n\nI've polished the last version of the patch a bit (added a regression \ntest with update of attribute in index predicate and docs about the new \nflag into indexam.sgml) and pushed.\n\nI wonder if we could/should improve handling of index predicates. In \nparticular, it seems to me we could simply ignore indexes when the new \nrow does not match the index predicate. For example, if there's an index\n\n CREATE INDEX ON t (a) WHERE b = 1;\n\nand the update does:\n\n UPDATE t SET b = 2 WHERE ...;\n\nthen we'll not add the tuple pointer to this index anyway, and we could \nsimply ignore this index when considering HOT. But I might be missing \nsomething important about HOT ...\n\nThe main problem I see with this is it requires evaluating the index \npredicate for each tuple, which makes it incompatible with the caching \nin RelationGetIndexAttrBitmap. Just ditching the caching seems like a \nbad idea, so we'd probably have to do this in two phases:\n\n1) Do what we do now, i.e. RelationGetIndexAttrBitmap considering all \nindexes / attributes. If this says HOT is possible, great - we're done.\n\n2) If (1) says HOT is not possible, we need to look whether it's because \nof regular or partial index. For regular indexes it's clear, for partial \nindexes we could ignore this if the predicate evaluates to false for the \nnew row.\n\nBut even if such optimization is possible, it's way out of scope of this \npatch and it's not clear to me it's actually a sensible trade-off.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Tue, 30 Nov 2021 20:11:03 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "út 30. 11. 2021 v 20:11 odesílatel Tomas Vondra\n<tomas.vondra@enterprisedb.com> napsal:\n>\n> OK,\n>\n> I've polished the last version of the patch a bit (added a regression\n> test with update of attribute in index predicate and docs about the new\n> flag into indexam.sgml) and pushed.\n\nThanks a lot for taking over this, improving and pushing!\n\n> I wonder if we could/should improve handling of index predicates. In\n> particular, it seems to me we could simply ignore indexes when the new\n> row does not match the index predicate. For example, if there's an index\n>\n> CREATE INDEX ON t (a) WHERE b = 1;\n>\n> and the update does:\n>\n> UPDATE t SET b = 2 WHERE ...;\n>\n> then we'll not add the tuple pointer to this index anyway, and we could\n> simply ignore this index when considering HOT. But I might be missing\n> something important about HOT ...\n>\n> The main problem I see with this is it requires evaluating the index\n> predicate for each tuple, which makes it incompatible with the caching\n> in RelationGetIndexAttrBitmap. Just ditching the caching seems like a\n> bad idea, so we'd probably have to do this in two phases:\n>\n> 1) Do what we do now, i.e. RelationGetIndexAttrBitmap considering all\n> indexes / attributes. If this says HOT is possible, great - we're done.\n>\n> 2) If (1) says HOT is not possible, we need to look whether it's because\n> of regular or partial index. For regular indexes it's clear, for partial\n> indexes we could ignore this if the predicate evaluates to false for the\n> new row.\n>\n> But even if such optimization is possible, it's way out of scope of this\n> patch and it's not clear to me it's actually a sensible trade-off.\n>\n>\n> regards\n>\n> --\n> Tomas Vondra\n> EnterpriseDB: http://www.enterprisedb.com\n> The Enterprise PostgreSQL Company\n\n\n", "msg_date": "Wed, 1 Dec 2021 11:31:15 +0100", "msg_from": "=?UTF-8?B?Sm9zZWYgxaBpbcOhbmVr?= <josef.simanek@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "On Tue, Nov 30, 2021 at 08:11:03PM +0100, Tomas Vondra wrote:\n> OK,\n> \n> I've polished the last version of the patch a bit (added a regression test\n> with update of attribute in index predicate and docs about the new flag into\n> indexam.sgml) and pushed.\n\nbrin.sql's new brin_hot test is failing sometimes.\n\nI saw a local failure and then found this.\nhttps://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=serinus&dt=2021-12-01%2003%3A00%3A07\n\n SELECT pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid);\n pg_stat_get_tuples_hot_updated \n --------------------------------\n- 1\n+ 0\n (1 row)\n\nEvidently because:\n| 2021-12-01 04:02:01.096 CET [61a6e587.3106b1:4] LOG: wait_for_hot_stats delayed 33.217301 seconds\n\nIt seems like maybe the UDP packet lost to the stats collector got lost ?\nIt fails less than 10% of the time here, probably depending on load.\n\nBTW there's a typo in brin.sql: precicates\n\n-- \nJustin\n\n\n", "msg_date": "Sun, 5 Dec 2021 10:47:31 -0600", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "Justin Pryzby <pryzby@telsasoft.com> writes:\n> brin.sql's new brin_hot test is failing sometimes.\n> Evidently because:\n> | 2021-12-01 04:02:01.096 CET [61a6e587.3106b1:4] LOG: wait_for_hot_stats delayed 33.217301 seconds\n> It seems like maybe the UDP packet lost to the stats collector got lost ?\n> It fails less than 10% of the time here, probably depending on load.\n\nOh, geez. *Please* let us not add another regression failure mode\nlike the ones that afflict stats.sql. We do not need a doubling\nof that failure rate. I suggest just removing this test.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 05 Dec 2021 15:16:02 -0500", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "On 12/5/21 21:16, Tom Lane wrote:\n> Justin Pryzby <pryzby@telsasoft.com> writes:\n>> brin.sql's new brin_hot test is failing sometimes.\n>> Evidently because:\n>> | 2021-12-01 04:02:01.096 CET [61a6e587.3106b1:4] LOG: wait_for_hot_stats delayed 33.217301 seconds\n>> It seems like maybe the UDP packet lost to the stats collector got lost ?\n>> It fails less than 10% of the time here, probably depending on load.\n> \n> Oh, geez. *Please* let us not add another regression failure mode\n> like the ones that afflict stats.sql. We do not need a doubling\n> of that failure rate. I suggest just removing this test.\n> \n\nWhooops. Agreed, I'll get rid of that test.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 6 Dec 2021 01:51:47 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "Tomas Vondra <tomas.vondra@enterprisedb.com> writes:\n> On 12/5/21 21:16, Tom Lane wrote:\n>> Oh, geez. *Please* let us not add another regression failure mode\n>> like the ones that afflict stats.sql. We do not need a doubling\n>> of that failure rate. I suggest just removing this test.\n\n> Whooops. Agreed, I'll get rid of that test.\n\nAnother idea, perhaps, is to shove that test into stats.sql,\nwhere people would know to ignore it? (Actually, I've thought\nmore than once that we should mark stats.sql as ignorable\nin the schedule ...)\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 05 Dec 2021 20:47:30 -0500", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" }, { "msg_contents": "On 12/6/21 02:47, Tom Lane wrote:\n> Tomas Vondra <tomas.vondra@enterprisedb.com> writes:\n>> On 12/5/21 21:16, Tom Lane wrote:\n>>> Oh, geez. *Please* let us not add another regression failure mode\n>>> like the ones that afflict stats.sql. We do not need a doubling\n>>> of that failure rate. I suggest just removing this test.\n> \n>> Whooops. Agreed, I'll get rid of that test.\n> \n> Another idea, perhaps, is to shove that test into stats.sql,\n> where people would know to ignore it? (Actually, I've thought\n> more than once that we should mark stats.sql as ignorable\n> in the schedule ...)\n> \n\nYep. I've moved the test to stats.sql - that seems better than just \nditching it, because we're experimenting with maybe relaxing the HOT \nrules for BRIN a bit further and not having tests for that would be \nunfortunate.\n\nI haven't marked the test as ignorable. I wonder if we should make that \ncustomizable, so that some animals (like serinus, which fails because of \nstats.sql from time to time) could run ignore it. But if it fails \nelsewhere it would still be considered a proper failure.\n\nregards\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sat, 11 Dec 2021 05:44:22 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Don't block HOT update by BRIN index" } ]
[ { "msg_contents": "Hi all,\n\njsonapi.c includes the following code bits to enforce the use of\nlogging:\n#ifdef FRONTEND\n#define check_stack_depth()\n#define json_log_and_abort(...) \\\n do { pg_log_fatal(__VA_ARGS__); exit(1); } while(0)\n#else\n#define json_log_and_abort(...) elog(ERROR, __VA_ARGS__)\n#endif\n\nThis has been mentioned here:\nhttps://www.postgresql.org/message-id/YNfXpFeBVfU2HsVe@paquier.xyz\n\nThis requires any tools in the frontend to use pg_logging_init(),\nwhich is recommended, but not enforced. Perhaps that's fine in\nitself to require frontends to register to the central logging APIs,\nbut json_log_and_abort() gets only called when dealing with incorrect\nerror codes even if we rely on JsonParseErrorType in all the places\ndoing error handling with the JSON parsing. And requiring a\ndependency on logging just for unlikely-to-happen cases seems a bit\ncrazy to me.\n\nAttached is a suggestion of patch to rework that a bit. Some extra\nelog()s could be added for the backend, as well as a new error code to\nuse as default of report_parse_error(), but that does not seem to gain\nmuch. And this item looks independent of switching this code to use\npqexpbuffer.h to be more portable with issues like OOM problems.\n\nThoughts?\n--\nMichael", "msg_date": "Wed, 30 Jun 2021 15:15:10 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": true, "msg_subject": "Dependency to logging in jsonapi.c" }, { "msg_contents": "Michael Paquier <michael@paquier.xyz> writes:\n> Attached is a suggestion of patch to rework that a bit. Some extra\n> elog()s could be added for the backend, as well as a new error code to\n> use as default of report_parse_error(), but that does not seem to gain\n> much. And this item looks independent of switching this code to use\n> pqexpbuffer.h to be more portable with issues like OOM problems.\n\n> Thoughts?\n\n+1 in general, but I think I'd replace the one in report_parse_error\nwith \"Assert(false)\", rather than just dropping it.\n\nIt does not look to me like json_errdetail can sensibly be used in\nfrontend, since it returns palloc'd strings in some paths and\nconstants in others. There'd be no way to avoid a memory leak\nin a frontend usage. So I think the dependency on psprintf there\nis not really a problem, but maybe we should make the entire function\n\"#ifndef FRONTEND\" to clarify the intended usage and avoid building\nuseless code into clients.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 30 Jun 2021 11:03:12 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Dependency to logging in jsonapi.c" }, { "msg_contents": "On Wed, 2021-06-30 at 11:03 -0400, Tom Lane wrote:\r\n> It does not look to me like json_errdetail can sensibly be used in\r\n> frontend, since it returns palloc'd strings in some paths and\r\n> constants in others. There'd be no way to avoid a memory leak\r\n> in a frontend usage. So I think the dependency on psprintf there\r\n> is not really a problem, but maybe we should make the entire function\r\n> \"#ifndef FRONTEND\" to clarify the intended usage and avoid building\r\n> useless code into clients.\r\n\r\nFWIW this is one of the fixes (patch 0002) in the JSON-for-libpq thread\r\n[1]. It ensures that all returned error strings are freeable by the\r\ncaller. That in turn was the impetus for the asprintf port suggestion.\r\n\r\nBut until/unless that is changed, an #ifndef seems like a good way to\r\nprevent issues for the current code.\r\n\r\n--Jacob\r\n\r\n[1] https://www.postgresql.org/message-id/flat/a250d475ba1c0cc0efb7dfec8e538fcc77cdcb8e.camel@vmware.com\r\n", "msg_date": "Wed, 30 Jun 2021 15:47:19 +0000", "msg_from": "Jacob Champion <pchampion@vmware.com>", "msg_from_op": false, "msg_subject": "Re: Dependency to logging in jsonapi.c" }, { "msg_contents": "On Wed, Jun 30, 2021 at 03:47:19PM +0000, Jacob Champion wrote:\n> On Wed, 2021-06-30 at 11:03 -0400, Tom Lane wrote:\n>> It does not look to me like json_errdetail can sensibly be used in\n>> frontend, since it returns palloc'd strings in some paths and\n>> constants in others. There'd be no way to avoid a memory leak\n>> in a frontend usage. So I think the dependency on psprintf there\n>> is not really a problem, but maybe we should make the entire function\n>> \"#ifndef FRONTEND\" to clarify the intended usage and avoid building\n>> useless code into clients.\n\nThat sounds sensible from here. One thing to be aware of is\njson_parse_manifest() in pg_verifybackup that uses it, but we could\njust replace the error by a plain \"failed to parse manifest\"\".\nBackup manifests are generated by the backend, so failures should not\nhappen there anyway.\n\n> FWIW this is one of the fixes (patch 0002) in the JSON-for-libpq thread\n> [1]. It ensures that all returned error strings are freeable by the\n> caller. That in turn was the impetus for the asprintf port suggestion.\n\nYes.\n\n> But until/unless that is changed, an #ifndef seems like a good way to\n> prevent issues for the current code.\n\nSounds sensible to do that as well for 14 before the release. Any\nthoughts about that?\n--\nMichael", "msg_date": "Thu, 1 Jul 2021 07:42:31 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": true, "msg_subject": "Re: Dependency to logging in jsonapi.c" }, { "msg_contents": "Michael Paquier <michael@paquier.xyz> writes:\n> On Wed, Jun 30, 2021 at 03:47:19PM +0000, Jacob Champion wrote:\n>> But until/unless that is changed, an #ifndef seems like a good way to\n>> prevent issues for the current code.\n\n> Sounds sensible to do that as well for 14 before the release. Any\n> thoughts about that?\n\nIf this code were new in v14, I'd be +1, but it looks like it was\nthere in 13 too. So maybe there's somebody external depending on\nit, which would make it a bit unfriendly to remove it post-beta.\nLet's just add the #ifndef in HEAD.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 30 Jun 2021 19:00:31 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Dependency to logging in jsonapi.c" }, { "msg_contents": "On Wed, Jun 30, 2021 at 07:00:31PM -0400, Tom Lane wrote:\n> If this code were new in v14, I'd be +1, but it looks like it was\n> there in 13 too. So maybe there's somebody external depending on\n> it, which would make it a bit unfriendly to remove it post-beta.\n> Let's just add the #ifndef in HEAD.\n\nRight, I needed more caffeine at this point in time. I have cleaned\nup that on HEAD, adding an assert at the end of report_parse_error()\nas you suggested.\n--\nMichael", "msg_date": "Fri, 2 Jul 2021 10:31:11 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": true, "msg_subject": "Re: Dependency to logging in jsonapi.c" } ]
[ { "msg_contents": "For PITR, getRecordTimestamp() did not include all record types that\ncontain times.\nAdd handling for checkpoints, end of recovery and prepared xact record types.\nBased on earlier discussions with community members.\n\nAlso, allow the option of recovery_target_use_origin_time = off (default) | on.\nThis allows PITR to consider whether it should use the local server\ntime of changes, or whether it should use the origin time on each\nnode. This is useful in multi-node data recovery.\n\nThis is part of a series of enhancements to PITR, in no specific order.\n\nPasses make check and recovery testing; includes docs.\n\n-- \nSimon Riggs http://www.EnterpriseDB.com/", "msg_date": "Wed, 30 Jun 2021 10:59:02 +0100", "msg_from": "Simon Riggs <simon.riggs@enterprisedb.com>", "msg_from_op": true, "msg_subject": "PITR: enhance getRecordTimestamp()" }, { "msg_contents": "> On 30 Jun 2021, at 11:59, Simon Riggs <simon.riggs@enterprisedb.com> wrote:\n\n> For PITR, getRecordTimestamp() did not include all record types that\n> contain times.\n> Add handling for checkpoints, end of recovery and prepared xact record types.\n\n+ <variablelist>\nThis breaks doc compilation, and looks like a stray tag as you want this entry\nin the currently open variablelist?\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Wed, 3 Nov 2021 14:28:41 +0100", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: PITR: enhance getRecordTimestamp()" }, { "msg_contents": "On Wed, 3 Nov 2021 at 13:28, Daniel Gustafsson <daniel@yesql.se> wrote:\n>\n> > On 30 Jun 2021, at 11:59, Simon Riggs <simon.riggs@enterprisedb.com> wrote:\n>\n> > For PITR, getRecordTimestamp() did not include all record types that\n> > contain times.\n> > Add handling for checkpoints, end of recovery and prepared xact record types.\n>\n> + <variablelist>\n> This breaks doc compilation, and looks like a stray tag as you want this entry\n> in the currently open variablelist?\n\nThanks. Fixed and rebased.\n\n-- \nSimon Riggs http://www.EnterpriseDB.com/", "msg_date": "Wed, 3 Nov 2021 16:59:04 +0000", "msg_from": "Simon Riggs <simon.riggs@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Re: PITR: enhance getRecordTimestamp()" }, { "msg_contents": "On Wed, Nov 03, 2021 at 04:59:04PM +0000, Simon Riggs wrote:\n> Thanks. Fixed and rebased.\n\n+ if (xact_info == XLOG_XACT_PREPARE)\n+ {\n+ if (recoveryTargetUseOriginTime)\n+ {\n+ xl_xact_prepare *xlrec = (xl_xact_prepare *) XLogRecGetData(record);\n+ xl_xact_parsed_prepare parsed;\n+\n+ ParsePrepareRecord(XLogRecGetInfo(record),\n+ xlrec,\n+ &parsed);\n+ *recordXtime = parsed.origin_timestamp;\n+ }\n+ else\n+ *recordXtime = ((xl_xact_prepare *) XLogRecGetData(record))->prepared_at;\n\nAs I learnt recently with ece8c76, there are cases where an origin\ntimestamp may not be set in the WAL record that includes the origin\ntimestamp depending on the setup done on the origin cluster. Isn't\nthis code going to finish by returning true when enabling\nrecovery_target_use_origin_time in some cases, even if recordXtime is\n0? So it seems to me that this is lacking some sanity checks if\nrecordXtime is 0.\n\nCould you add some tests for this proposal? This adds various PITR\nscenarios that would be uncovered, and TAP should be able to cover\nthat.\n--\nMichael", "msg_date": "Thu, 27 Jan 2022 15:58:17 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: PITR: enhance getRecordTimestamp()" }, { "msg_contents": "On Thu, 27 Jan 2022 at 06:58, Michael Paquier <michael@paquier.xyz> wrote:\n>\n> On Wed, Nov 03, 2021 at 04:59:04PM +0000, Simon Riggs wrote:\n> > Thanks. Fixed and rebased.\n>\n> + if (xact_info == XLOG_XACT_PREPARE)\n> + {\n> + if (recoveryTargetUseOriginTime)\n> + {\n> + xl_xact_prepare *xlrec = (xl_xact_prepare *) XLogRecGetData(record);\n> + xl_xact_parsed_prepare parsed;\n> +\n> + ParsePrepareRecord(XLogRecGetInfo(record),\n> + xlrec,\n> + &parsed);\n> + *recordXtime = parsed.origin_timestamp;\n> + }\n> + else\n> + *recordXtime = ((xl_xact_prepare *) XLogRecGetData(record))->prepared_at;\n>\n> As I learnt recently with ece8c76, there are cases where an origin\n> timestamp may not be set in the WAL record that includes the origin\n> timestamp depending on the setup done on the origin cluster. Isn't\n> this code going to finish by returning true when enabling\n> recovery_target_use_origin_time in some cases, even if recordXtime is\n> 0? So it seems to me that this is lacking some sanity checks if\n> recordXtime is 0.\n>\n> Could you add some tests for this proposal? This adds various PITR\n> scenarios that would be uncovered, and TAP should be able to cover\n> that.\n\nThanks. Yes, will look at that.\n\n-- \nSimon Riggs http://www.EnterpriseDB.com/\n\n\n", "msg_date": "Mon, 31 Jan 2022 19:11:39 +0000", "msg_from": "Simon Riggs <simon.riggs@enterprisedb.com>", "msg_from_op": true, "msg_subject": "Re: PITR: enhance getRecordTimestamp()" } ]
[ { "msg_contents": "Hi all,\n\nThe next commit fest is going to begin soon.\nI would like to volunteer as commit fest manager for 2021-07 if the\nrole is not filled and there are no objections.\n\nRegards,\nVignesh\n\n\n", "msg_date": "Wed, 30 Jun 2021 19:01:03 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": true, "msg_subject": "Commit fest manager" }, { "msg_contents": "> On 30 Jun 2021, at 15:31, vignesh C <vignesh21@gmail.com> wrote:\n\n> The next commit fest is going to begin soon.\n> I would like to volunteer as commit fest manager for 2021-07 if the\n> role is not filled and there are no objections.\n\nIbrar Ahmed has already volunteered since a while back, so let's see if he is\nstill keen to take it on:\n\nhttps://postgr.es/m/CALtqXTcGbFEUwg-8MgmsAojP8Te61_QaeDJbneXjBybeeyZkRg@mail.gmail.com\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Wed, 30 Jun 2021 15:33:34 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: Commit fest manager" }, { "msg_contents": "Hi,\nYes, I want to do that for sure.\n\n\nOn Wed, Jun 30, 2021 at 6:33 PM Daniel Gustafsson <daniel@yesql.se> wrote:\n\n> > On 30 Jun 2021, at 15:31, vignesh C <vignesh21@gmail.com> wrote:\n>\n> > The next commit fest is going to begin soon.\n> > I would like to volunteer as commit fest manager for 2021-07 if the\n> > role is not filled and there are no objections.\n>\n> Ibrar Ahmed has already volunteered since a while back, so let's see if he\n> is\n> still keen to take it on:\n>\n>\n> https://postgr.es/m/CALtqXTcGbFEUwg-8MgmsAojP8Te61_QaeDJbneXjBybeeyZkRg@mail.gmail.com\n>\n> --\n> Daniel Gustafsson https://vmware.com/\n>\n>\n\n-- \nIbrar Ahmed\n\nHi,Yes, I want to do that for sure.On Wed, Jun 30, 2021 at 6:33 PM Daniel Gustafsson <daniel@yesql.se> wrote:> On 30 Jun 2021, at 15:31, vignesh C <vignesh21@gmail.com> wrote:\n\n> The next commit fest is going to begin soon.\n> I would like to volunteer as commit fest manager for 2021-07 if the\n> role is not filled and there are no objections.\n\nIbrar Ahmed has already volunteered since a while back, so let's see if he is\nstill keen to take it on:\n\nhttps://postgr.es/m/CALtqXTcGbFEUwg-8MgmsAojP8Te61_QaeDJbneXjBybeeyZkRg@mail.gmail.com\n\n--\nDaniel Gustafsson               https://vmware.com/\n\n-- Ibrar Ahmed", "msg_date": "Thu, 1 Jul 2021 00:26:55 +0500", "msg_from": "Ibrar Ahmed <ibrar.ahmad@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Commit fest manager" }, { "msg_contents": "On Thu, Jul 1, 2021 at 12:26:55AM +0500, Ibrar Ahmed wrote:\n> Hi,\n> Yes, I want to do that for sure.\n\nShould we reserve the next commit fest for vignesh C?\n\n---------------------------------------------------------------------------\n> \n> On Wed, Jun 30, 2021 at 6:33 PM Daniel Gustafsson <daniel@yesql.se> wrote:\n> \n> > On 30 Jun 2021, at 15:31, vignesh C <vignesh21@gmail.com> wrote:\n> \n> > The next commit fest is going to begin soon.\n> > I would like to volunteer as commit fest manager for 2021-07 if the\n> > role is not filled and there are no objections.\n> \n> Ibrar Ahmed has already volunteered since a while back, so let's see if he\n> is\n> still keen to take it on:\n> \n> https://postgr.es/m/\n> CALtqXTcGbFEUwg-8MgmsAojP8Te61_QaeDJbneXjBybeeyZkRg@mail.gmail.com\n> \n> --\n> Daniel Gustafsson               https://vmware.com/\n> \n> \n> \n> \n> --\n> Ibrar Ahmed\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Thu, 1 Jul 2021 21:29:01 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: Commit fest manager" }, { "msg_contents": "On Thu, Jul 01, 2021 at 09:29:01PM -0400, Bruce Momjian wrote:\n> Should we reserve the next commit fest for vignesh C?\n\nIf Vignesh is willing to help here, I'd say yes to that.\n--\nMichael", "msg_date": "Fri, 2 Jul 2021 10:37:45 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Commit fest manager" }, { "msg_contents": "On Thu, Jul 01, 2021 at 12:26:55AM +0500, Ibrar Ahmed wrote:\n> Yes, I want to do that for sure.\n\nThanks. Please note that I have switched the CF as \"In Progress\" in\nthe app, and added a couple of future CFs while on it to cover the\ndevelopment cycle for 15.\n--\nMichael", "msg_date": "Fri, 2 Jul 2021 10:39:25 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Commit fest manager" }, { "msg_contents": "On Fri, Jul 2, 2021 at 7:08 AM Michael Paquier <michael@paquier.xyz> wrote:\n>\n> On Thu, Jul 01, 2021 at 09:29:01PM -0400, Bruce Momjian wrote:\n> > Should we reserve the next commit fest for vignesh C?\n>\n> If Vignesh is willing to help here, I'd say yes to that.\n\nI'm interested in assisting Ibrar Ahmed.\n\nRegards,\nVignesh\n\n\n", "msg_date": "Fri, 2 Jul 2021 08:33:44 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Commit fest manager" }, { "msg_contents": "On Fri, 2 Jul 2021 at 15:04, vignesh C <vignesh21@gmail.com> wrote:\n> I'm interested in assisting Ibrar Ahmed.\n\nIt might be worth talking to Ibrar to see where you can lend a hand. I\nthink in terms of the number of patches, this might be our biggest\ncommitfest yet.\n\n2020-07 246\n2020-09 235\n2020-11 244\n2021-01 260\n2020-03 295\n2020-07 342\n\nIt's possible Ibrar would welcome you helping to take care of some of\nthe duties. I've never been brave enough to take on the CF manager\nrole yet, but from what I can see, to do a good job takes a huge\namount of effort.\n\nDavid\n\n\n", "msg_date": "Fri, 2 Jul 2021 20:47:05 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Commit fest manager" }, { "msg_contents": "On Fri, 2 Jul 2021 at 1:47 PM, David Rowley <dgrowleyml@gmail.com> wrote:\n\n> On Fri, 2 Jul 2021 at 15:04, vignesh C <vignesh21@gmail.com> wrote:\n> > I'm interested in assisting Ibrar Ahmed.\n>\n> It might be worth talking to Ibrar to see where you can lend a hand. I\n> think in terms of the number of patches, this might be our biggest\n> commitfest yet.\n>\n> 2020-07 246\n> 2020-09 235\n> 2020-11 244\n> 2021-01 260\n> 2020-03 295\n> 2020-07 342\n>\n> It's possible Ibrar would welcome you helping to take care of some of\n> the duties. I've never been brave enough to take on the CF manager\n> role yet, but from what I can see, to do a good job takes a huge\n> amount of effort.\n>\n> David\n\n\nI am willing to take the responsibility, help from vegnsh is welcome\n\n>\n> --\nIbrar Ahmed\n\nOn Fri, 2 Jul 2021 at 1:47 PM, David Rowley <dgrowleyml@gmail.com> wrote:On Fri, 2 Jul 2021 at 15:04, vignesh C <vignesh21@gmail.com> wrote:\n> I'm interested in assisting Ibrar Ahmed.\n\nIt might be worth talking to Ibrar to see where you can lend a hand. I\nthink in terms of the number of patches, this might be our biggest\ncommitfest yet.\n\n2020-07 246\n2020-09 235\n2020-11 244\n2021-01 260\n2020-03 295\n2020-07 342\n\nIt's possible Ibrar would welcome you helping to take care of some of\nthe duties.  I've never been brave enough to take on the CF manager\nrole yet, but from what I can see, to do a good job takes a huge\namount of effort.\n\nDavidI am willing to take the responsibility, help from vegnsh is welcome\n-- Ibrar Ahmed", "msg_date": "Fri, 2 Jul 2021 17:35:00 +0500", "msg_from": "Ibrar Ahmed <ibrar.ahmad@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Commit fest manager" }, { "msg_contents": "On Fri, Jul 2, 2021 at 6:05 PM Ibrar Ahmed <ibrar.ahmad@gmail.com> wrote:\n>\n>\n>\n> On Fri, 2 Jul 2021 at 1:47 PM, David Rowley <dgrowleyml@gmail.com> wrote:\n>>\n>> On Fri, 2 Jul 2021 at 15:04, vignesh C <vignesh21@gmail.com> wrote:\n>> > I'm interested in assisting Ibrar Ahmed.\n>>\n>> It might be worth talking to Ibrar to see where you can lend a hand. I\n>> think in terms of the number of patches, this might be our biggest\n>> commitfest yet.\n>>\n>> 2020-07 246\n>> 2020-09 235\n>> 2020-11 244\n>> 2021-01 260\n>> 2020-03 295\n>> 2020-07 342\n>>\n>> It's possible Ibrar would welcome you helping to take care of some of\n>> the duties. I've never been brave enough to take on the CF manager\n>> role yet, but from what I can see, to do a good job takes a huge\n>> amount of effort.\n>>\n>> David\n>\n>\n> I am willing to take the responsibility, help from vegnsh is welcome\n\nThanks, Can someone provide me permissions as this will be my first\ntime. My username is vignesh.postgres.\n\nRegards,\nVignesh\n\n\n", "msg_date": "Fri, 2 Jul 2021 19:35:55 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Commit fest manager" }, { "msg_contents": "On Fri, 2 Jul 2021 at 7:06 PM, vignesh C <vignesh21@gmail.com> wrote:\n\n> On Fri, Jul 2, 2021 at 6:05 PM Ibrar Ahmed <ibrar.ahmad@gmail.com> wrote:\n> >\n> >\n> >\n> > On Fri, 2 Jul 2021 at 1:47 PM, David Rowley <dgrowleyml@gmail.com>\n> wrote:\n> >>\n> >> On Fri, 2 Jul 2021 at 15:04, vignesh C <vignesh21@gmail.com> wrote:\n> >> > I'm interested in assisting Ibrar Ahmed.\n> >>\n> >> It might be worth talking to Ibrar to see where you can lend a hand. I\n> >> think in terms of the number of patches, this might be our biggest\n> >> commitfest yet.\n> >>\n> >> 2020-07 246\n> >> 2020-09 235\n> >> 2020-11 244\n> >> 2021-01 260\n> >> 2020-03 295\n> >> 2020-07 342\n> >>\n> >> It's possible Ibrar would welcome you helping to take care of some of\n> >> the duties. I've never been brave enough to take on the CF manager\n> >> role yet, but from what I can see, to do a good job takes a huge\n> >> amount of effort.\n> >>\n> >> David\n> >\n> >\n> > I am willing to take the responsibility, help from vegnsh is welcome\n>\n> Thanks, Can someone provide me permissions as this will be my first\n> time. My username is vignesh.postgres.\n>\n> Regards,\n> Vignesh\n\ni need permission my id is ibrar.ahmad@gmail.com\n\n>\n> --\nIbrar Ahmed\n\nOn Fri, 2 Jul 2021 at 7:06 PM, vignesh C <vignesh21@gmail.com> wrote:On Fri, Jul 2, 2021 at 6:05 PM Ibrar Ahmed <ibrar.ahmad@gmail.com> wrote:\n>\n>\n>\n> On Fri, 2 Jul 2021 at 1:47 PM, David Rowley <dgrowleyml@gmail.com> wrote:\n>>\n>> On Fri, 2 Jul 2021 at 15:04, vignesh C <vignesh21@gmail.com> wrote:\n>> > I'm interested in assisting Ibrar Ahmed.\n>>\n>> It might be worth talking to Ibrar to see where you can lend a hand. I\n>> think in terms of the number of patches, this might be our biggest\n>> commitfest yet.\n>>\n>> 2020-07 246\n>> 2020-09 235\n>> 2020-11 244\n>> 2021-01 260\n>> 2020-03 295\n>> 2020-07 342\n>>\n>> It's possible Ibrar would welcome you helping to take care of some of\n>> the duties.  I've never been brave enough to take on the CF manager\n>> role yet, but from what I can see, to do a good job takes a huge\n>> amount of effort.\n>>\n>> David\n>\n>\n> I am willing to take the responsibility, help from vegnsh is welcome\n\nThanks, Can someone provide me permissions as this will be my first\ntime. My username is vignesh.postgres.\n\nRegards,\nVigneshi need permission my id is ibrar.ahmad@gmail.com\n-- Ibrar Ahmed", "msg_date": "Fri, 2 Jul 2021 19:15:02 +0500", "msg_from": "Ibrar Ahmed <ibrar.ahmad@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Commit fest manager" }, { "msg_contents": "On Fri, Jul 2, 2021 at 7:15 PM Ibrar Ahmed <ibrar.ahmad@gmail.com> wrote:\n\n>\n>\n> On Fri, 2 Jul 2021 at 7:06 PM, vignesh C <vignesh21@gmail.com> wrote:\n>\n>> On Fri, Jul 2, 2021 at 6:05 PM Ibrar Ahmed <ibrar.ahmad@gmail.com> wrote:\n>> >\n>> >\n>> >\n>> > On Fri, 2 Jul 2021 at 1:47 PM, David Rowley <dgrowleyml@gmail.com>\n>> wrote:\n>> >>\n>> >> On Fri, 2 Jul 2021 at 15:04, vignesh C <vignesh21@gmail.com> wrote:\n>> >> > I'm interested in assisting Ibrar Ahmed.\n>> >>\n>> >> It might be worth talking to Ibrar to see where you can lend a hand. I\n>> >> think in terms of the number of patches, this might be our biggest\n>> >> commitfest yet.\n>> >>\n>> >> 2020-07 246\n>> >> 2020-09 235\n>> >> 2020-11 244\n>> >> 2021-01 260\n>> >> 2020-03 295\n>> >> 2020-07 342\n>> >>\n>> >> It's possible Ibrar would welcome you helping to take care of some of\n>> >> the duties. I've never been brave enough to take on the CF manager\n>> >> role yet, but from what I can see, to do a good job takes a huge\n>> >> amount of effort.\n>> >>\n>> >> David\n>> >\n>> >\n>> > I am willing to take the responsibility, help from vegnsh is welcome\n>>\n>> Thanks, Can someone provide me permissions as this will be my first\n>> time. My username is vignesh.postgres.\n>>\n>> Regards,\n>> Vignesh\n>\n> i need permission my id is ibrar.ahmad@gmail.com\n>\n>>\n>> --\n> Ibrar Ahmed\n>\n\n\nAny update and decision on this? so I can start working on this.\n\n-- \nIbrar Ahmed\n\nOn Fri, Jul 2, 2021 at 7:15 PM Ibrar Ahmed <ibrar.ahmad@gmail.com> wrote:On Fri, 2 Jul 2021 at 7:06 PM, vignesh C <vignesh21@gmail.com> wrote:On Fri, Jul 2, 2021 at 6:05 PM Ibrar Ahmed <ibrar.ahmad@gmail.com> wrote:\n>\n>\n>\n> On Fri, 2 Jul 2021 at 1:47 PM, David Rowley <dgrowleyml@gmail.com> wrote:\n>>\n>> On Fri, 2 Jul 2021 at 15:04, vignesh C <vignesh21@gmail.com> wrote:\n>> > I'm interested in assisting Ibrar Ahmed.\n>>\n>> It might be worth talking to Ibrar to see where you can lend a hand. I\n>> think in terms of the number of patches, this might be our biggest\n>> commitfest yet.\n>>\n>> 2020-07 246\n>> 2020-09 235\n>> 2020-11 244\n>> 2021-01 260\n>> 2020-03 295\n>> 2020-07 342\n>>\n>> It's possible Ibrar would welcome you helping to take care of some of\n>> the duties.  I've never been brave enough to take on the CF manager\n>> role yet, but from what I can see, to do a good job takes a huge\n>> amount of effort.\n>>\n>> David\n>\n>\n> I am willing to take the responsibility, help from vegnsh is welcome\n\nThanks, Can someone provide me permissions as this will be my first\ntime. My username is vignesh.postgres.\n\nRegards,\nVigneshi need permission my id is ibrar.ahmad@gmail.com\n-- Ibrar Ahmed\nAny update and decision on this? so I can start working on this.-- Ibrar Ahmed", "msg_date": "Tue, 6 Jul 2021 15:38:23 +0500", "msg_from": "Ibrar Ahmed <ibrar.ahmad@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Commit fest manager" }, { "msg_contents": "On Tue, Jul 06, 2021 at 03:38:23PM +0500, Ibrar Ahmed wrote:\n> Any update and decision on this? so I can start working on this.\n\nWorking on the CF does not strongly require the admin permissions. I\nhave already switched the current CF as in progress, so most of the\nadmin job is done for this month :)\n\nAnother piece of interest are the reviewer/author stat reports, but\npoking at patching does not need this information in most cases.\n--\nMichael", "msg_date": "Tue, 6 Jul 2021 19:58:47 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Commit fest manager" }, { "msg_contents": "On Tue, Jul 6, 2021 at 3:58 PM Michael Paquier <michael@paquier.xyz> wrote:\n\n> On Tue, Jul 06, 2021 at 03:38:23PM +0500, Ibrar Ahmed wrote:\n> > Any update and decision on this? so I can start working on this.\n>\n> Working on the CF does not strongly require the admin permissions. I\n> have already switched the current CF as in progress, so most of the\n> admin job is done for this month :)\n>\n> Another piece of interest are the reviewer/author stat reports, but\n> poking at patching does not need this information in most cases.\n> --\n> Michael\n>\n\nGreat, thanks Michael, I will start doing my work :)\n\n-- \nIbrar Ahmed\n\nOn Tue, Jul 6, 2021 at 3:58 PM Michael Paquier <michael@paquier.xyz> wrote:On Tue, Jul 06, 2021 at 03:38:23PM +0500, Ibrar Ahmed wrote:\n> Any update and decision on this? so I can start working on this.\n\nWorking on the CF does not strongly require the admin permissions.  I\nhave already switched the current CF as in progress, so most of the\nadmin job is done for this month :)\n\nAnother piece of interest are the reviewer/author stat reports, but\npoking at patching does not need this information in most cases.\n--\nMichael\nGreat, thanks Michael, I will start doing my work :)-- Ibrar Ahmed", "msg_date": "Tue, 6 Jul 2021 16:25:47 +0500", "msg_from": "Ibrar Ahmed <ibrar.ahmad@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Commit fest manager" } ]
[ { "msg_contents": "Hi,\n\nLast year in [1], I had briefly mentioned $subject. I'm starting this\nthread to propose a small patch to alleviate the inefficiency of that\ncase.\n\nAs also mentioned in [1], when running -Mprepared benchmarks\n(plan_cache_mode = force_generic_plan) using partitioned tables,\nExecRTCheckPerms() tends to show up in the profile, especially with\nlarge partition counts. Granted it's lurking behind\nAcquireExecutorLocks(), LockReleaseAll() et al, but still seems like a\nproblem we should do something about.\n\nThe problem is that it loops over the entire range table even though\nonly one or handful of those entries actually need their permissions\nchecked. Most entries, especially those of partition child tables\nhave their requiredPerms set to 0, which David pointed out to me in\n[2], so what ExecCheckRTPerms() does in their case is pure overhead.\n\nAn idea to fix that is to store the RT indexes of the entries that\nhave non-0 requiredPerms into a separate list or a bitmapset in\nPlannedStmt. I thought of two implementation ideas for how to set\nthat:\n\n1. Put add_rtes_to_flat_rtable() in the charge of populating it:\n\n@@ -324,12 +324,18 @@ add_rtes_to_flat_rtable(PlannerInfo *root, bool recursing)\n * flattened rangetable match up with their original indexes. When\n * recursing, we only care about extracting relation RTEs.\n */\n+ rti = 1;\n foreach(lc, root->parse->rtable)\n {\n RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);\n\n if (!recursing || rte->rtekind == RTE_RELATION)\n+ {\n add_rte_to_flat_rtable(glob, rte);\n+ if (rte->requiredPerms != 0)\n+ glob->checkPermRels = bms_add_member(glob->checkPermRels, rti);\n+ }\n+ rti++\n }\n\n2. Start populating checkPermRels in ParseState (parse_relation.c),\npassing it along in Query through the rewriter and finally the\nplanner.\n\n1 seems very simple, but appears to add overhead to what is likely a\noft-taken path. Also, the newly added code would have to run as many\ntimes as there are partitions, which sounds like a dealbreaker to me.\n\n2 can seem a bit complex. Given that the set is tracked in Query,\nspecial care is needed to handle views and subqueries correctly,\nbecause those features involve intricate manipulation of Query nodes\nand their range tables. However, most of that special care code\nremains out of the busy paths. Also, none of that code touches\npartition/child RTEs, so unaffected by how many of them there are.\n\nFor now, I have implemented the idea 2 as the attached patch. While\nit passes make check-world, I am not fully confident yet that it\ncorrectly handles all the cases involving views and subqueries.\n\nSo while still kind of PoC, will add this to July CF for keeping track.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com\n\n[1] https://www.postgresql.org/message-id/CA+HiwqG7ZruBmmih3wPsBZ4s0H2EhywrnXEduckY5Hr3fWzPWA@mail.gmail.com\n[2] https://www.postgresql.org/message-id/CAApHDvqPzsMcKLRpmNpUW97PmaQDTmD7b2BayEPS5AN4LY-0bA%40mail.gmail.com", "msg_date": "Wed, 30 Jun 2021 22:33:44 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Thu, 1 Jul 2021 at 01:34, Amit Langote <amitlangote09@gmail.com> wrote:\n> For now, I have implemented the idea 2 as the attached patch.\n\nI only just had a fleeting glance at the patch. Aren't you\naccidentally missing the 0th RTE here?\n\n+ while ((rti = bms_next_member(checkPermRels, rti)) > 0)\n {\n- RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);\n+ RangeTblEntry *rte = (RangeTblEntry *) list_nth(rangeTable, rti - 1);\n\nI'd have expected >= 0 rather than > 0.\n\nDavid\n\n\n", "msg_date": "Thu, 1 Jul 2021 02:33:46 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, Jun 30, 2021 at 23:34 David Rowley <dgrowleyml@gmail.com> wrote:\n\n> On Thu, 1 Jul 2021 at 01:34, Amit Langote <amitlangote09@gmail.com> wrote:\n> > For now, I have implemented the idea 2 as the attached patch.\n>\n> I only just had a fleeting glance at the patch. Aren't you\n> accidentally missing the 0th RTE here?\n>\n> + while ((rti = bms_next_member(checkPermRels, rti)) > 0)\n> {\n> - RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);\n> + RangeTblEntry *rte = (RangeTblEntry *) list_nth(rangeTable, rti - 1);\n>\n> I'd have expected >= 0 rather than > 0.\n\n\nHmm, a valid RT index cannot be 0, so that seems fine to me. Note that RT\nindexes are added as-is to that bitmapset, not after subtracting 1.\n\n> --\nAmit Langote\nEDB: http://www.enterprisedb.com\n\nOn Wed, Jun 30, 2021 at 23:34 David Rowley <dgrowleyml@gmail.com> wrote:On Thu, 1 Jul 2021 at 01:34, Amit Langote <amitlangote09@gmail.com> wrote:\n> For now, I have implemented the idea 2 as the attached patch.\n\nI only just had a fleeting glance at the patch. Aren't you\naccidentally missing the 0th RTE here?\n\n+ while ((rti = bms_next_member(checkPermRels, rti)) > 0)\n  {\n- RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);\n+ RangeTblEntry *rte = (RangeTblEntry *) list_nth(rangeTable, rti - 1);\n\nI'd have expected >= 0 rather than > 0.Hmm, a valid RT index cannot be 0, so that seems fine to me.  Note that RT indexes are added as-is to that bitmapset, not after subtracting 1.-- Amit LangoteEDB: http://www.enterprisedb.com", "msg_date": "Wed, 30 Jun 2021 23:58:23 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Thu, 1 Jul 2021 at 02:58, Amit Langote <amitlangote09@gmail.com> wrote:\n>\n> On Wed, Jun 30, 2021 at 23:34 David Rowley <dgrowleyml@gmail.com> wrote:\n>> + while ((rti = bms_next_member(checkPermRels, rti)) > 0)\n>> {\n>> - RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);\n>> + RangeTblEntry *rte = (RangeTblEntry *) list_nth(rangeTable, rti - 1);\n>>\n>> I'd have expected >= 0 rather than > 0.\n>\n> Hmm, a valid RT index cannot be 0, so that seems fine to me. Note that RT indexes are added as-is to that bitmapset, not after subtracting 1.\n\nOh, you're right. My mistake.\n\nDavid\n\n\n", "msg_date": "Thu, 1 Jul 2021 10:51:51 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Amit Langote <amitlangote09@gmail.com> writes:\n> The problem is that it loops over the entire range table even though\n> only one or handful of those entries actually need their permissions\n> checked. Most entries, especially those of partition child tables\n> have their requiredPerms set to 0, which David pointed out to me in\n> [2], so what ExecCheckRTPerms() does in their case is pure overhead.\n\n> An idea to fix that is to store the RT indexes of the entries that\n> have non-0 requiredPerms into a separate list or a bitmapset in\n> PlannedStmt.\n\nI think perhaps we ought to be more ambitious than that, and consider\nseparating the list of permissions-to-check from the rtable entirely.\nYour patch hardly qualifies as non-invasive, plus it seems to invite\nerrors of omission, while if we changed the data structure altogether\nthen the compiler would help find any not-updated code.\n\nBut the main reason that this strikes me as possibly a good idea\nis that I was just taking another look at the complaint in [1],\nwhere I wrote\n\n>> I think it's impossible to avoid less-than-O(N^2) growth on this sort\n>> of case. For example, the v2 subquery initially has RTEs for v2 itself\n>> plus v1. When we flatten v1 into v2, v2 acquires the RTEs from v1,\n>> namely v1 itself plus foo. Similarly, once vK-1 is pulled up into vK,\n>> there are going to be order-of-K entries in vK's rtable, and that stacking\n>> makes for O(N^2) work overall just in manipulating the rtable.\n>> \n>> We can't get rid of these rtable entries altogether, since all of them\n>> represent table privilege checks that the executor will need to do.\n\nPerhaps, if we separated the rtable from the required-permissions data\nstructure, then we could avoid pulling up otherwise-useless RTEs when\nflattening a view (or even better, not make the extra RTEs in the\nfirst place??), and thus possibly avoid that exponential planning-time\ngrowth for nested views.\n\nOr maybe not. But I think we should take a hard look at whether\nseparating these data structures could solve both of these problems\nat once.\n\n\t\t\tregards, tom lane\n\n[1] https://www.postgresql.org/message-id/flat/797aff54-b49b-4914-9ff9-aa42564a4d7d%40www.fastmail.com\n\n\n", "msg_date": "Thu, 01 Jul 2021 11:45:03 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Jul 2, 2021 at 12:45 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> Amit Langote <amitlangote09@gmail.com> writes:\n> > The problem is that it loops over the entire range table even though\n> > only one or handful of those entries actually need their permissions\n> > checked. Most entries, especially those of partition child tables\n> > have their requiredPerms set to 0, which David pointed out to me in\n> > [2], so what ExecCheckRTPerms() does in their case is pure overhead.\n>\n> > An idea to fix that is to store the RT indexes of the entries that\n> > have non-0 requiredPerms into a separate list or a bitmapset in\n> > PlannedStmt.\n>\n> I think perhaps we ought to be more ambitious than that, and consider\n> separating the list of permissions-to-check from the rtable entirely.\n> Your patch hardly qualifies as non-invasive, plus it seems to invite\n> errors of omission, while if we changed the data structure altogether\n> then the compiler would help find any not-updated code.\n>\n> But the main reason that this strikes me as possibly a good idea\n> is that I was just taking another look at the complaint in [1],\n> where I wrote\n>\n> >> I think it's impossible to avoid less-than-O(N^2) growth on this sort\n> >> of case. For example, the v2 subquery initially has RTEs for v2 itself\n> >> plus v1. When we flatten v1 into v2, v2 acquires the RTEs from v1,\n> >> namely v1 itself plus foo. Similarly, once vK-1 is pulled up into vK,\n> >> there are going to be order-of-K entries in vK's rtable, and that stacking\n> >> makes for O(N^2) work overall just in manipulating the rtable.\n> >>\n> >> We can't get rid of these rtable entries altogether, since all of them\n> >> represent table privilege checks that the executor will need to do.\n>\n> Perhaps, if we separated the rtable from the required-permissions data\n> structure, then we could avoid pulling up otherwise-useless RTEs when\n> flattening a view (or even better, not make the extra RTEs in the\n> first place??), and thus possibly avoid that exponential planning-time\n> growth for nested views.\n>\n> Or maybe not. But I think we should take a hard look at whether\n> separating these data structures could solve both of these problems\n> at once.\n\nAh, okay. I'll think about decoupling the permission checking stuff\nfrom the range table data structure.\n\nThanks for the feedback.\n\nI'll mark the CF entry as WoA, unless you'd rather I just mark it RwF.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 2 Jul 2021 09:40:47 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, 2 Jul 2021 at 12:41, Amit Langote <amitlangote09@gmail.com> wrote:\n> I'll mark the CF entry as WoA, unless you'd rather I just mark it RwF.\n\nI've set it to waiting on author. It was still set to needs review.\n\nIf you think you'll not get time to write the patch during this CF,\nfeel free to bump it out.\n\nDavid\n\n\n", "msg_date": "Wed, 7 Jul 2021 16:41:13 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, Jul 7, 2021 at 1:41 PM David Rowley <dgrowleyml@gmail.com> wrote:\n> On Fri, 2 Jul 2021 at 12:41, Amit Langote <amitlangote09@gmail.com> wrote:\n> > I'll mark the CF entry as WoA, unless you'd rather I just mark it RwF.\n>\n> I've set it to waiting on author. It was still set to needs review.\n\nSorry it slipped my mind to do that and thanks.\n\n> If you think you'll not get time to write the patch during this CF,\n> feel free to bump it out.\n\nI will try to post an update next week if not later this week,\nhopefully with an updated patch.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Wed, 7 Jul 2021 16:12:49 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Jul 2, 2021 at 9:40 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Fri, Jul 2, 2021 at 12:45 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> > I think perhaps we ought to be more ambitious than that, and consider\n> > separating the list of permissions-to-check from the rtable entirely.\n> > Your patch hardly qualifies as non-invasive, plus it seems to invite\n> > errors of omission, while if we changed the data structure altogether\n> > then the compiler would help find any not-updated code.\n> >\n> > But the main reason that this strikes me as possibly a good idea\n> > is that I was just taking another look at the complaint in [1],\n> > where I wrote\n> >\n> > >> I think it's impossible to avoid less-than-O(N^2) growth on this sort\n> > >> of case. For example, the v2 subquery initially has RTEs for v2 itself\n> > >> plus v1. When we flatten v1 into v2, v2 acquires the RTEs from v1,\n> > >> namely v1 itself plus foo. Similarly, once vK-1 is pulled up into vK,\n> > >> there are going to be order-of-K entries in vK's rtable, and that stacking\n> > >> makes for O(N^2) work overall just in manipulating the rtable.\n> > >>\n> > >> We can't get rid of these rtable entries altogether, since all of them\n> > >> represent table privilege checks that the executor will need to do.\n> >\n> > Perhaps, if we separated the rtable from the required-permissions data\n> > structure, then we could avoid pulling up otherwise-useless RTEs when\n> > flattening a view (or even better, not make the extra RTEs in the\n> > first place??), and thus possibly avoid that exponential planning-time\n> > growth for nested views.\n> >\n> > Or maybe not. But I think we should take a hard look at whether\n> > separating these data structures could solve both of these problems\n> > at once.\n>\n> Ah, okay. I'll think about decoupling the permission checking stuff\n> from the range table data structure.\n\nI have finished with the attached. Sorry about the delay.\n\nThink I've managed to get the first part done -- getting the\npermission-checking info out of the range table -- but have not\nseriously attempted the second -- doing away with the OLD/NEW range\ntable entries in the view/rule action queries, assuming that is what\nyou meant in the quoted.\n\nOne design point I think might need reconsidering is that the list of\nthe new RelPermissionInfo nodes that holds the permission-checking\ninfo for relations has to be looked up with a linear search using the\nrelation OID, whereas it was basically free before if a particular of\ncode had the RTE handy. Maybe I need to check if the overhead of that\nis noticeable in some cases.\n\nAs there's not much time left in this CF, I've bumped the entry to the next one.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Thu, 29 Jul 2021 17:40:00 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Thu, Jul 29, 2021 at 5:40 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Fri, Jul 2, 2021 at 9:40 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> > On Fri, Jul 2, 2021 at 12:45 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> > > Perhaps, if we separated the rtable from the required-permissions data\n> > > structure, then we could avoid pulling up otherwise-useless RTEs when\n> > > flattening a view (or even better, not make the extra RTEs in the\n> > > first place??), and thus possibly avoid that exponential planning-time\n> > > growth for nested views.\n>\n> Think I've managed to get the first part done -- getting the\n> permission-checking info out of the range table -- but have not\n> seriously attempted the second -- doing away with the OLD/NEW range\n> table entries in the view/rule action queries, assuming that is what\n> you meant in the quoted.\n\nI took a stab at the 2nd part, implemented in the attached 0002.\n\nThe patch removes UpdateRangeTableOfViewParse() which would add the\ndummy OLD/NEW entries to a view rule's action query's rtable, citing\nthese reasons:\n\n- * These extra RT entries are not actually used in the query,\n- * except for run-time locking and permission checking.\n\n0001 makes them unnecessary for permission checking. Though, a\nRELATION-kind RTE still be must be present in the rtable for run-time\nlocking, so I adjusted ApplyRetrieveRule() as follows:\n\n@@ -1803,16 +1804,26 @@ ApplyRetrieveRule(Query *parsetree,\n * original RTE to a subquery RTE.\n */\n rte = rt_fetch(rt_index, parsetree->rtable);\n+ subquery_rte = rte;\n\n- rte->rtekind = RTE_SUBQUERY;\n- rte->subquery = rule_action;\n- rte->security_barrier = RelationIsSecurityView(relation);\n+ /*\n+ * Before modifying, store a copy of itself so as to serve as the entry\n+ * to be used by the executor to lock the view relation and for the\n+ * planner to be able to record the view relation OID in the PlannedStmt\n+ * that it produces for the query.\n+ */\n+ rte = copyObject(rte);\n+ parsetree->rtable = lappend(parsetree->rtable, rte);\n+\n+ subquery_rte->rtekind = RTE_SUBQUERY;\n+ subquery_rte->subquery = rule_action;\n+ subquery_rte->security_barrier = RelationIsSecurityView(relation);\n /* Clear fields that should not be set in a subquery RTE */\n- rte->relid = InvalidOid;\n- rte->relkind = 0;\n- rte->rellockmode = 0;\n- rte->tablesample = NULL;\n- rte->inh = false; /* must not be set for a subquery */\n+ subquery_rte->relid = InvalidOid;\n+ subquery_rte->relkind = 0;\n+ subquery_rte->rellockmode = 0;\n+ subquery_rte->tablesample = NULL;\n+ subquery_rte->inh = false; /* must not be set for a subquery */\n\n return parsetree;\n }\n\nOutputs for a bunch of regression tests needed to be adjusted to\naccount for that pg_get_viewdef() no longer qualifies view column\nnames in the deparsed queries, that is, if they reference only a\nsingle relation. Previously, those dummy OLD/NEW entries tricked\nmake_ruledef(), get_query_def() et al into setting\ndeparse_context.varprefix to true. contrib/postgre_fdw test output\nlikewise needed adjustment due to its deparse code being impacted by\nthose dummy entries no longer being present, I believe.\n\nI haven't yet checked how this further improves the performance for\nthe case discussed at [1] that prompted this.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com\n\n[1] https://www.postgresql.org/message-id/flat/797aff54-b49b-4914-9ff9-aa42564a4d7d%40www.fastmail.com", "msg_date": "Fri, 20 Aug 2021 22:46:17 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Aug 20, 2021 at 10:46 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Thu, Jul 29, 2021 at 5:40 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > On Fri, Jul 2, 2021 at 9:40 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > On Fri, Jul 2, 2021 at 12:45 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> > > > Perhaps, if we separated the rtable from the required-permissions data\n> > > > structure, then we could avoid pulling up otherwise-useless RTEs when\n> > > > flattening a view (or even better, not make the extra RTEs in the\n> > > > first place??), and thus possibly avoid that exponential planning-time\n> > > > growth for nested views.\n> >\n> > Think I've managed to get the first part done -- getting the\n> > permission-checking info out of the range table -- but have not\n> > seriously attempted the second -- doing away with the OLD/NEW range\n> > table entries in the view/rule action queries, assuming that is what\n> > you meant in the quoted.\n>\n> I took a stab at the 2nd part, implemented in the attached 0002.\n>\n> The patch removes UpdateRangeTableOfViewParse() which would add the\n> dummy OLD/NEW entries to a view rule's action query's rtable\n>\n> I haven't yet checked how this further improves the performance for\n> the case discussed at [1] that prompted this.\n>\n> [1] https://www.postgresql.org/message-id/flat/797aff54-b49b-4914-9ff9-aa42564a4d7d%40www.fastmail.com\n\nI checked the time required to do explain select * from v512 (worst\ncase), using the setup described at the above link and I get the\nfollowing numbers:\n\nHEAD: 119.774 ms\n0001 : 129.802 ms\n0002 : 109.456 ms\n\nSo it appears that applying only 0001 makes things a bit worse for\nthis case. That seems to have to do with the following addition in\npull_up_simple_subquery():\n\n@@ -1131,6 +1131,9 @@ pull_up_simple_subquery(PlannerInfo *root, Node\n*jtnode, RangeTblEntry *rte,\n */\n parse->rtable = list_concat(parse->rtable, subquery->rtable);\n\n+ parse->relpermlist = MergeRelPermissionInfos(parse->relpermlist,\n+ subquery->relpermlist);\n+\n\nWhat it does is pull up the RelPermissionInfo nodes in the subquery\nbeing pulled up into the parent query and it's not a simple\nlist_concat(), because I decided that it's better to de-duplicate the\nentries for a given relation OID even across subqueries.\n\nThings get better than HEAD with 0002, because less work needs to be\ndone in the rewriter when copying the subqueries into the main query,\nespecially the range table, which only has 1 entry now, not 3 per\nview.\n\nAttached updated patches. I wrote a longer commit message for 0002 this time.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Thu, 26 Aug 2021 18:13:33 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Got this warning:\n\n/pgsql/source/master/contrib/postgres_fdw/postgres_fdw.c: In function 'GetResultRelCheckAsUser':\n/pgsql/source/master/contrib/postgres_fdw/postgres_fdw.c:1898:7: warning: unused variable 'result' [-Wunused-variable]\n Oid result;\n ^~~~~~\n\nI think the idea that GetRelPermissionInfo always has to scan the\ncomplete list by OID is a nonstarter. Maybe it would be possible to\nstore the list index of the PermissionInfo element in the RelOptInfo or\nthe RTE? Maybe use special negative values if unknown (it knows to\nsearch the first time) or known non-existant (probably a coding error\ncondition, maybe not necessary to have this)\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Mon, 6 Sep 2021 16:35:20 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Thanks Alvaro for taking a look at this.\n\nOn Tue, Sep 7, 2021 at 4:35 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> Got this warning:\n>\n> /pgsql/source/master/contrib/postgres_fdw/postgres_fdw.c: In function 'GetResultRelCheckAsUser':\n> /pgsql/source/master/contrib/postgres_fdw/postgres_fdw.c:1898:7: warning: unused variable 'result' [-Wunused-variable]\n> Oid result;\n> ^~~~~~\n\nFixed.\n\n> I think the idea that GetRelPermissionInfo always has to scan the\n> complete list by OID is a nonstarter. Maybe it would be possible to\n> store the list index of the PermissionInfo element in the RelOptInfo or\n> the RTE? Maybe use special negative values if unknown (it knows to\n> search the first time) or known non-existant (probably a coding error\n> condition, maybe not necessary to have this)\n\nI implemented this by adding an Index field in RangeTblEntry, because\nGetRelPermissionInfo() is used in all phases of query processing and\nonly RTEs exist from start to end. I did have to spend some time\ngetting that approach right (get `make check` to pass!), especially to\nensure that the indexes remain in sync during the merging of\nRelPermissionInfo across subqueries. The comments I wrote around\nGetRelPermissionInfo(), MergeRelPermissionInfos() functions should\nhopefully make things clear. Though, I do have a slightly uneasy\nfeeling around the fact that RTEs now store information that is\ncomputed using some non-trivial logic, whereas most other fields are\nsimple catalog state or trivial details extracted from how the query\nis spelled out by the user.\n\nI also noticed that setrefs.c: add_rtes_to_flat_rtable() was still\ndoing things -- adding dead subquery RTEs and any RTEs referenced in\nthe underlying subquery to flat rtable -- that the new approach of\npermissions handling makes unnecessary. I fixed that oversight in the\nupdated patch. A benefit from that simplification is that there is\nnow a single loop over rtable in that function rather than two that\nwere needed before.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 10 Sep 2021 12:22:30 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Sep 10, 2021 at 12:22 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Tue, Sep 7, 2021 at 4:35 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > I think the idea that GetRelPermissionInfo always has to scan the\n> > complete list by OID is a nonstarter. Maybe it would be possible to\n> > store the list index of the PermissionInfo element in the RelOptInfo or\n> > the RTE? Maybe use special negative values if unknown (it knows to\n> > search the first time) or known non-existant (probably a coding error\n> > condition, maybe not necessary to have this)\n>\n> I implemented this by adding an Index field in RangeTblEntry, because\n> GetRelPermissionInfo() is used in all phases of query processing and\n> only RTEs exist from start to end. I did have to spend some time\n> getting that approach right (get `make check` to pass!), especially to\n> ensure that the indexes remain in sync during the merging of\n> RelPermissionInfo across subqueries. The comments I wrote around\n> GetRelPermissionInfo(), MergeRelPermissionInfos() functions should\n> hopefully make things clear. Though, I do have a slightly uneasy\n> feeling around the fact that RTEs now store information that is\n> computed using some non-trivial logic, whereas most other fields are\n> simple catalog state or trivial details extracted from how the query\n> is spelled out by the user.\n>\n> I also noticed that setrefs.c: add_rtes_to_flat_rtable() was still\n> doing things -- adding dead subquery RTEs and any RTEs referenced in\n> the underlying subquery to flat rtable -- that the new approach of\n> permissions handling makes unnecessary. I fixed that oversight in the\n> updated patch. A benefit from that simplification is that there is\n> now a single loop over rtable in that function rather than two that\n> were needed before.\n\nPatch 0002 needed a rebase, because a conflicting change to\nexpected/rules.out has since been committed.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 20 Dec 2021 16:13:04 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Hi,\n\nOn Mon, Dec 20, 2021 at 04:13:04PM +0900, Amit Langote wrote:\n> \n> Patch 0002 needed a rebase, because a conflicting change to\n> expected/rules.out has since been committed.\n\nThe cfbot reports new conflicts since about a week ago with this patch:\n\nLatest failure: https://cirrus-ci.com/task/4686414276198400 and\nhttps://api.cirrus-ci.com/v1/artifact/task/4686414276198400/regress_diffs/src/test/regress/regression.diffs\n\ndiff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/xml.out /tmp/cirrus-ci-build/src/test/regress/results/xml.out\n--- /tmp/cirrus-ci-build/src/test/regress/expected/xml.out\t2022-01-12 05:24:02.795477001 +0000\n+++ /tmp/cirrus-ci-build/src/test/regress/results/xml.out\t2022-01-12 05:28:20.329086031 +0000\n@@ -603,12 +603,12 @@\n CREATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as text);\n SELECT table_name, view_definition FROM information_schema.views\n WHERE table_name LIKE 'xmlview%' ORDER BY 1;\n- table_name | view_definition\n-------------+-------------------------------------------------------------------------------------------------------------------\n+ table_name | view_definition\n+------------+------------------------------------------------------------------------------------------------------------\n xmlview1 | SELECT xmlcomment('test'::text) AS xmlcomment;\n xmlview2 | SELECT XMLCONCAT('hello'::xml, 'you'::xml) AS \"xmlconcat\";\n xmlview3 | SELECT XMLELEMENT(NAME element, XMLATTRIBUTES(1 AS \":one:\", 'deuce' AS two), 'content&') AS \"xmlelement\";\n- xmlview4 | SELECT XMLELEMENT(NAME employee, XMLFOREST(emp.name AS name, emp.age AS age, emp.salary AS pay)) AS \"xmlelement\"+\n+ xmlview4 | SELECT XMLELEMENT(NAME employee, XMLFOREST(name AS name, age AS age, salary AS pay)) AS \"xmlelement\" +\n | FROM emp;\n xmlview5 | SELECT XMLPARSE(CONTENT '<abc>x</abc>'::text STRIP WHITESPACE) AS \"xmlparse\";\n xmlview6 | SELECT XMLPI(NAME foo, 'bar'::text) AS \"xmlpi\";\ndiff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/compression.out /tmp/cirrus-ci-build/src/test/regress/results/compression.out\n--- /tmp/cirrus-ci-build/src/test/regress/expected/compression.out\t2022-01-12 05:24:02.739471690 +0000\n+++ /tmp/cirrus-ci-build/src/test/regress/results/compression.out\t2022-01-12 05:28:23.537403929 +0000\n@@ -187,7 +187,7 @@\n --------+------+-----------+----------+---------+----------+-------------+--------------+-------------\n x | text | | | | extended | | |\n View definition:\n- SELECT cmdata1.f1 AS x\n+ SELECT f1 AS x\n FROM cmdata1;\n\n SELECT pg_column_compression(f1) FROM cmdata1;\n@@ -274,7 +274,7 @@\n --------+------+-----------+----------+---------+----------+-------------+--------------+-------------\n x | text | | | | extended | lz4 | |\n View definition:\n- SELECT cmdata1.f1 AS x\n+ SELECT f1 AS x\n FROM cmdata1;\n\nCould you send a rebased patch? In the meantime I'll switch the cf entry to\nWaiting on Author.\n\n\n", "msg_date": "Thu, 13 Jan 2022 11:10:05 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Thu, Jan 13, 2022 at 12:10 PM Julien Rouhaud <rjuju123@gmail.com> wrote:\n> On Mon, Dec 20, 2021 at 04:13:04PM +0900, Amit Langote wrote:\n> >\n> > Patch 0002 needed a rebase, because a conflicting change to\n> > expected/rules.out has since been committed.\n>\n> The cfbot reports new conflicts since about a week ago with this patch:\n\nI had noticed that too and was meaning to send a new version. Thanks\nfor the reminder.\n\n> Latest failure: https://cirrus-ci.com/task/4686414276198400 and\n> https://api.cirrus-ci.com/v1/artifact/task/4686414276198400/regress_diffs/src/test/regress/regression.diffs\n>\n> diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/xml.out /tmp/cirrus-ci-build/src/test/regress/results/xml.out\n> --- /tmp/cirrus-ci-build/src/test/regress/expected/xml.out 2022-01-12 05:24:02.795477001 +0000\n> +++ /tmp/cirrus-ci-build/src/test/regress/results/xml.out 2022-01-12 05:28:20.329086031 +0000\n> @@ -603,12 +603,12 @@\n> CREATE VIEW xmlview9 AS SELECT xmlserialize(content 'good' as text);\n> SELECT table_name, view_definition FROM information_schema.views\n> WHERE table_name LIKE 'xmlview%' ORDER BY 1;\n> - table_name | view_definition\n> -------------+-------------------------------------------------------------------------------------------------------------------\n> + table_name | view_definition\n> +------------+------------------------------------------------------------------------------------------------------------\n> xmlview1 | SELECT xmlcomment('test'::text) AS xmlcomment;\n> xmlview2 | SELECT XMLCONCAT('hello'::xml, 'you'::xml) AS \"xmlconcat\";\n> xmlview3 | SELECT XMLELEMENT(NAME element, XMLATTRIBUTES(1 AS \":one:\", 'deuce' AS two), 'content&') AS \"xmlelement\";\n> - xmlview4 | SELECT XMLELEMENT(NAME employee, XMLFOREST(emp.name AS name, emp.age AS age, emp.salary AS pay)) AS \"xmlelement\"+\n> + xmlview4 | SELECT XMLELEMENT(NAME employee, XMLFOREST(name AS name, age AS age, salary AS pay)) AS \"xmlelement\" +\n> | FROM emp;\n> xmlview5 | SELECT XMLPARSE(CONTENT '<abc>x</abc>'::text STRIP WHITESPACE) AS \"xmlparse\";\n> xmlview6 | SELECT XMLPI(NAME foo, 'bar'::text) AS \"xmlpi\";\n> diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/compression.out /tmp/cirrus-ci-build/src/test/regress/results/compression.out\n> --- /tmp/cirrus-ci-build/src/test/regress/expected/compression.out 2022-01-12 05:24:02.739471690 +0000\n> +++ /tmp/cirrus-ci-build/src/test/regress/results/compression.out 2022-01-12 05:28:23.537403929 +0000\n> @@ -187,7 +187,7 @@\n> --------+------+-----------+----------+---------+----------+-------------+--------------+-------------\n> x | text | | | | extended | | |\n> View definition:\n> - SELECT cmdata1.f1 AS x\n> + SELECT f1 AS x\n> FROM cmdata1;\n>\n> SELECT pg_column_compression(f1) FROM cmdata1;\n> @@ -274,7 +274,7 @@\n> --------+------+-----------+----------+---------+----------+-------------+--------------+-------------\n> x | text | | | | extended | lz4 | |\n> View definition:\n> - SELECT cmdata1.f1 AS x\n> + SELECT f1 AS x\n> FROM cmdata1;\n>\n> Could you send a rebased patch? In the meantime I'll switch the cf entry to\n> Waiting on Author.\n\nTurns out I had never compiled this patch set to exercise xml and lz4\ntests, whose output files contained view definitions shown using \\d\nthat also needed to be updated in the 0002 patch.\n\nFixed in the attached updated version.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Thu, 13 Jan 2022 15:39:12 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Thu, Jan 13, 2022 at 3:39 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Thu, Jan 13, 2022 at 12:10 PM Julien Rouhaud <rjuju123@gmail.com> wrote:\n> > On Mon, Dec 20, 2021 at 04:13:04PM +0900, Amit Langote wrote:\n> > > Patch 0002 needed a rebase, because a conflicting change to\n> > > expected/rules.out has since been committed.\n> >\n> > The cfbot reports new conflicts since about a week ago with this patch:\n> > Could you send a rebased patch? In the meantime I'll switch the cf entry to\n> > Waiting on Author.\n>\n> Turns out I had never compiled this patch set to exercise xml and lz4\n> tests, whose output files contained view definitions shown using \\d\n> that also needed to be updated in the 0002 patch.\n>\n> Fixed in the attached updated version.\n\ncfbot tells me it found a conflict when applying v7 on the latest\nHEAD. Fixed in the attached v8.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 17 Jan 2022 20:50:42 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Mon, Jan 17, 2022 at 3:51 AM Amit Langote <amitlangote09@gmail.com>\nwrote:\n\n> On Thu, Jan 13, 2022 at 3:39 PM Amit Langote <amitlangote09@gmail.com>\n> wrote:\n> > On Thu, Jan 13, 2022 at 12:10 PM Julien Rouhaud <rjuju123@gmail.com>\n> wrote:\n> > > On Mon, Dec 20, 2021 at 04:13:04PM +0900, Amit Langote wrote:\n> > > > Patch 0002 needed a rebase, because a conflicting change to\n> > > > expected/rules.out has since been committed.\n> > >\n> > > The cfbot reports new conflicts since about a week ago with this patch:\n> > > Could you send a rebased patch? In the meantime I'll switch the cf\n> entry to\n> > > Waiting on Author.\n> >\n> > Turns out I had never compiled this patch set to exercise xml and lz4\n> > tests, whose output files contained view definitions shown using \\d\n> > that also needed to be updated in the 0002 patch.\n> >\n> > Fixed in the attached updated version.\n>\n> cfbot tells me it found a conflict when applying v7 on the latest\n> HEAD. Fixed in the attached v8.\n>\n> Hi,\nFor patch 02, in the description:\n\npresent for locking views during execition\n\nTypo: execution.\n\n+ * to be used by the executor to lock the view relation and for the\n+ * planner to be able to record the view relation OID in the PlannedStmt\n+ * that it produces for the query.\n\nI think the sentence about executor can be placed after the sentence for\nthe planner.\n\nFor patch 01, GetRelPermissionInfo():\n\n+ return perminfo;\n+ }\n+ else\n\nkeyword 'else' is not needed - the else block can be left-indented.\n\nCheers\n\nOn Mon, Jan 17, 2022 at 3:51 AM Amit Langote <amitlangote09@gmail.com> wrote:On Thu, Jan 13, 2022 at 3:39 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Thu, Jan 13, 2022 at 12:10 PM Julien Rouhaud <rjuju123@gmail.com> wrote:\n> > On Mon, Dec 20, 2021 at 04:13:04PM +0900, Amit Langote wrote:\n> > > Patch 0002 needed a rebase, because a conflicting change to\n> > > expected/rules.out has since been committed.\n> >\n> > The cfbot reports new conflicts since about a week ago with this patch:\n> > Could you send a rebased patch?  In the meantime I'll switch the cf entry to\n> > Waiting on Author.\n>\n> Turns out I had never compiled this patch set to exercise xml and lz4\n> tests, whose output files contained view definitions shown using \\d\n> that also needed to be updated in the 0002 patch.\n>\n> Fixed in the attached updated version.\n\ncfbot tells me it found a conflict when applying v7 on the latest\nHEAD.  Fixed in the attached v8.Hi,For patch 02, in the description:present for locking views during execition Typo: execution.+    * to be used by the executor to lock the view relation and for the+    * planner to be able to record the view relation OID in the PlannedStmt+    * that it produces for the query.I think the sentence about executor can be placed after the sentence for the planner.For patch 01, GetRelPermissionInfo():+       return perminfo;+   }+   elsekeyword 'else' is not needed - the else block can be left-indented.Cheers", "msg_date": "Mon, 17 Jan 2022 07:45:06 -0800", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Tue, Jan 18, 2022 at 12:42 AM Zhihong Yu <zyu@yugabyte.com> wrote:\n> Hi,\n> For patch 02, in the description:\n\nThanks for looking.\n\n> present for locking views during execition\n>\n> Typo: execution.\n>\n> + * to be used by the executor to lock the view relation and for the\n> + * planner to be able to record the view relation OID in the PlannedStmt\n> + * that it produces for the query.\n>\n> I think the sentence about executor can be placed after the sentence for the planner.\n\nFixed.\n\n> For patch 01, GetRelPermissionInfo():\n>\n> + return perminfo;\n> + }\n> + else\n>\n> keyword 'else' is not needed - the else block can be left-indented.\n\nOK, done.\n\nAlso needed fixes when rebasing.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 14 Mar 2022 16:36:53 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Mon, Mar 14, 2022 at 4:36 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> Also needed fixes when rebasing.\n\nNeeded another rebase.\n\nAs the changes being made with the patch are non-trivial and the patch\nhasn't been reviewed very significantly since Alvaro's comments back\nin Sept 2021 which I've since addressed, I'm thinking of pushing this\none into the version 16 dev cycle.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 23 Mar 2022 16:03:04 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On 2022-Mar-23, Amit Langote wrote:\n\n> As the changes being made with the patch are non-trivial and the patch\n> hasn't been reviewed very significantly since Alvaro's comments back\n> in Sept 2021 which I've since addressed, I'm thinking of pushing this\n> one into the version 16 dev cycle.\n\nLet's not get ahead of ourselves. The commitfest is not yet over.\n\n-- \nÁlvaro Herrera 48°01'N 7°57'E — https://www.EnterpriseDB.com/\n\"La virtud es el justo medio entre dos defectos\" (Aristóteles)\n\n\n", "msg_date": "Wed, 23 Mar 2022 10:49:53 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, Mar 23, 2022 at 12:03 AM Amit Langote <amitlangote09@gmail.com>\nwrote:\n\n> On Mon, Mar 14, 2022 at 4:36 PM Amit Langote <amitlangote09@gmail.com>\n> wrote:\n> > Also needed fixes when rebasing.\n>\n> Needed another rebase.\n>\n> As the changes being made with the patch are non-trivial and the patch\n> hasn't been reviewed very significantly since Alvaro's comments back\n> in Sept 2021 which I've since addressed, I'm thinking of pushing this\n> one into the version 16 dev cycle.\n>\n> --\n> Amit Langote\n> EDB: http://www.enterprisedb.com\n\nHi,\nFor patch 1:\n\nbq. makes permissions-checking needlessly expensive when many inheritance\nchildren are added to the range range\n\n'range' is repeated in the above sentence.\n\n+ExecCheckOneRelPerms(RelPermissionInfo *perminfo)\n\nSince RelPermissionInfo is for one relation, I think the 'One' in func name\ncan be dropped.\n\n+ else /* this isn't a child result rel */\n+ resultRelInfo->ri_RootToChildMap = NULL;\n...\n+ resultRelInfo->ri_RootToChildMapValid = true;\n\nShould the assignment of true value be moved into the if block (in the else\nblock, ri_RootToChildMap is assigned NULL) ?\n\n+ /* Looks like the RTE doesn't, so try to find it the hard way. */\n\ndoesn't -> doesn't know\n\nCheers\n\nOn Wed, Mar 23, 2022 at 12:03 AM Amit Langote <amitlangote09@gmail.com> wrote:On Mon, Mar 14, 2022 at 4:36 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> Also needed fixes when rebasing.\n\nNeeded another rebase.\n\nAs the changes being made with the patch are non-trivial and the patch\nhasn't been reviewed very significantly since Alvaro's comments back\nin Sept 2021 which I've since addressed, I'm thinking of pushing this\none into the version 16 dev cycle.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.comHi,For patch 1:bq. makes permissions-checking needlessly expensive when many inheritance children are added to the range range 'range' is repeated in the above sentence.+ExecCheckOneRelPerms(RelPermissionInfo *perminfo)Since RelPermissionInfo is for one relation, I think the 'One' in func name can be dropped.+       else                    /* this isn't a child result rel */+           resultRelInfo->ri_RootToChildMap = NULL;...+       resultRelInfo->ri_RootToChildMapValid = true;Should the assignment of true value be moved into the if block (in the else block, ri_RootToChildMap is assigned NULL) ?+   /* Looks like the RTE doesn't, so try to find it the hard way. */doesn't -> doesn't knowCheers", "msg_date": "Wed, 23 Mar 2022 14:02:10 -0700", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, 23 Mar 2022 at 20:03, Amit Langote <amitlangote09@gmail.com> wrote:\n>\n> On Mon, Mar 14, 2022 at 4:36 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > Also needed fixes when rebasing.\n>\n> Needed another rebase.\n\nI had a look at the v10-0001 patch. I agree that it seems to be a good\nidea to separate out the required permission checks rather than having\nthe Bitmapset to index the interesting range table entries.\n\nOne thing that I could just not determine from looking at the patch\nwas if there's meant to be just 1 RelPermissionInfo per RTE or per rel\nOid. None of the comments helped me understand this and\nMergeRelPermissionInfos() seems to exist that appears to try and\nuniquify this to some extent. I just see no code that does this\nprocess for a single query level. I've provided more detail on this in\n#5 below.\n\nHere's my complete review of v10-0001:\n\n1. ExecCheckPermisssions -> ExecCheckPermissions\n\n2. I think you'll want to move the userid field away from below a\ncomment that claims the following fields are for foreign tables only.\n\n /* Information about foreign tables and foreign joins */\n Oid serverid; /* identifies server for the table or join */\n- Oid userid; /* identifies user to check access as */\n+ Oid userid; /* identifies user to check access as; set\n+ * in non-foreign table relations too! */\n\n3. This should use READ_OID_FIELD()\n\nREAD_INT_FIELD(checkAsUser);\n\nand this one:\n\nREAD_INT_FIELD(relid);\n\n4. This looks wrong:\n\n- rel->userid = rte->checkAsUser;\n+ if (rte->rtekind == RTE_RELATION)\n+ {\n+ /* otherrels use the root parent's value. */\n+ rel->userid = parent ? parent->userid :\n+ GetRelPermissionInfo(root->parse->relpermlist,\n+ rte, false)->checkAsUser;\n+ }\n\nIf 'parent' is false then you'll assign the result of\nGetRelPermissionInfo (a RelPermissionInfo *) to an Oid.\n\n5. I'm not sure if there's a case that can break this one, but I'm not\nvery confident that there's not one:\n\nI'm not sure I agree with how you've coded GetRelPermissionInfo().\nYou're searching for a RelPermissionInfo based on the table's Oid. If\nyou have multiple RelPermissionInfos for the same Oid then this will\njust find the first one and return it, but that might not be the one\nfor the RangeTblEntry in question.\n\nHere's an example of the sort of thing that could have problems with this:\n\npostgres=# create role bob;\nCREATE ROLE\npostgres=# create table ab (a int, b int);\nCREATE TABLE\npostgres=# grant all (a) on table ab to bob;\nGRANT\npostgres=# set session authorization bob;\nSET\npostgres=> update ab set a = (select b from ab);\nERROR: permission denied for table ab\n\nThe patch does correctly ERROR out here on permission failure, but as\nfar as I can see, that's just due to the fact that we're checking the\npermissions of all items in the PlannedStmt.relpermlist List. If\nthere had been code that had tried to find the RelPermissionInfo based\non the relation's Oid then we'd have accidentally found that we only\nneed an UPDATE permission on (a). SELECT on (b) wouldn't have been\nchecked.\n\nAs far as I can see, to fix that you'd either need to store the RTI of\nthe RelPermissionInfo and lookup based on that, or you'd have to\nbms_union() all the columns and bitwise OR the required permissions\nand ensure you only have 1 RelPermissionInfo per Oid.\n\nThe fact that I have two entries when I debug InitPlan() seems to\ndisagree with what the comment in AddRelPermissionInfo() is claiming\nshould happen:\n\n/*\n* To prevent duplicate entries for a given relation, check if already in\n* the list.\n*/\n\nI'm not clear on if the list is meant to be unique on Oid or not.\n\n6. acesss?\n\n- * Set flags and access permissions.\n+ * Set flags and initialize acesss permissions.\n\n7. I was expecting to see an |= here:\n\n+ /* \"merge\" proprties. */\n+ dest_perminfo->inh = src_perminfo->inh;\n\nWhy is a plain assignment ok?\n\n8. Some indentation problems here:\n\n@@ -3170,6 +3148,8 @@ rewriteTargetView(Query *parsetree, Relation view)\n\n base_rt_index = rtr->rtindex;\n base_rte = rt_fetch(base_rt_index, viewquery->rtable);\n+base_perminfo = GetRelPermissionInfo(viewquery->relpermlist, base_rte,\n+ false);\n\n9. You can use foreach_current_index(lc) + 1 in:\n\n+ i = 0;\n+ foreach(lc, relpermlist)\n+ {\n+ perminfo = (RelPermissionInfo *) lfirst(lc);\n+ if (perminfo->relid == rte->relid)\n+ {\n+ /* And set the index in RTE. */\n+ rte->perminfoindex = i + 1;\n+ return perminfo;\n+ }\n+ i++;\n+ }\n\n10. I think the double quote is not in the correct place in this comment:\n\n List *finalrtable; /* \"flat\" rangetable for executor */\n\n+ List *finalrelpermlist; /* \"flat list of RelPermissionInfo \"*/\n\n\n11. Looks like an accidental change:\n\n+++ b/src/include/optimizer/planner.h\n@@ -58,4 +58,5 @@ extern Path *get_cheapest_fractional_path(RelOptInfo *rel,\n\n extern Expr *preprocess_phv_expression(PlannerInfo *root, Expr *expr);\n\n+\n\n12. These need to be broken into multiple lines:\n\n+extern RelPermissionInfo *AddRelPermissionInfo(List **relpermlist,\nRangeTblEntry *rte);\n+extern void MergeRelPermissionInfos(Query *dest_query, List *src_relpermlist);\n+extern RelPermissionInfo *GetRelPermissionInfo(List *relpermlist,\nRangeTblEntry *rte, bool missing_ok);\n\nDavid\n\n\n", "msg_date": "Fri, 25 Mar 2022 08:46:09 +1300", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Mar 25, 2022 at 4:46 AM David Rowley <dgrowleyml@gmail.com> wrote:\n> I had a look at the v10-0001 patch. I agree that it seems to be a good\n> idea to separate out the required permission checks rather than having\n> the Bitmapset to index the interesting range table entries.\n\nThanks David for taking a look at this.\n\n> One thing that I could just not determine from looking at the patch\n> was if there's meant to be just 1 RelPermissionInfo per RTE or per rel\n> Oid.\n\nIt's the latter.\n\n> None of the comments helped me understand this\n\nI agree that the comment above the RelPermissionInfo struct definition\nmissed mentioning this really important bit. I've tried fixing that\nas:\n\n@@ -1142,7 +1142,9 @@ typedef struct RangeTblEntry\n * Per-relation information for permission checking. Added to the query\n * by the parser when populating the query range table and subsequently\n * editorialized on by the rewriter and the planner. There is an entry\n- * each for all RTE_RELATION entries present in the range table.\n+ * each for all RTE_RELATION entries present in the range table, though\n+ * different RTEs for the same relation share the\nRelPermissionInfo, that\n+ * is, there is only one RelPermissionInfo containing a given relid.\n\n> and\n> MergeRelPermissionInfos() seems to exist that appears to try and\n> uniquify this to some extent. I just see no code that does this\n> process for a single query level. I've provided more detail on this in\n> #5 below.\n>\n> Here's my complete review of v10-0001:\n>\n> 1. ExecCheckPermisssions -> ExecCheckPermissions\n\nFixed.\n\n> 2. I think you'll want to move the userid field away from below a\n> comment that claims the following fields are for foreign tables only.\n>\n> /* Information about foreign tables and foreign joins */\n> Oid serverid; /* identifies server for the table or join */\n> - Oid userid; /* identifies user to check access as */\n> + Oid userid; /* identifies user to check access as; set\n> + * in non-foreign table relations too! */\n\nActually, I realized that the comment should not have been touched to\nbegin with. Reverted.\n\n> 3. This should use READ_OID_FIELD()\n>\n> READ_INT_FIELD(checkAsUser);\n>\n> and this one:\n>\n> READ_INT_FIELD(relid);\n\nFixed.\n\n> 4. This looks wrong:\n>\n> - rel->userid = rte->checkAsUser;\n> + if (rte->rtekind == RTE_RELATION)\n> + {\n> + /* otherrels use the root parent's value. */\n> + rel->userid = parent ? parent->userid :\n> + GetRelPermissionInfo(root->parse->relpermlist,\n> + rte, false)->checkAsUser;\n> + }\n>\n> If 'parent' is false then you'll assign the result of\n> GetRelPermissionInfo (a RelPermissionInfo *) to an Oid.\n\nHmm, I don't see a problem, because what's being assigned is\n`GetRelPermissionInfo(...)->checkAsUser`.\n\nAnyway, I rewrote the block more verbosely as:\n\n if (rte->rtekind == RTE_RELATION)\n {\n- /* otherrels use the root parent's value. */\n- rel->userid = parent ? parent->userid :\n- GetRelPermissionInfo(root->parse->relpermlist,\n- rte, false)->checkAsUser;\n+ /*\n+ * Get the userid from the relation's RelPermissionInfo, though\n+ * only the tables mentioned in query are assigned RelPermissionInfos.\n+ * Child relations (otherrels) simply use the parent's value.\n+ */\n+ if (parent == NULL)\n+ {\n+ RelPermissionInfo *perminfo =\n+ GetRelPermissionInfo(root->parse->relpermlist, rte, false);\n+\n+ rel->userid = perminfo->checkAsUser;\n+ }\n+ else\n+ rel->userid = parent->userid;\n }\n+ else\n+ rel->userid = InvalidOid;\n\n> 5. I'm not sure if there's a case that can break this one, but I'm not\n> very confident that there's not one:\n>\n> I'm not sure I agree with how you've coded GetRelPermissionInfo().\n> You're searching for a RelPermissionInfo based on the table's Oid. If\n> you have multiple RelPermissionInfos for the same Oid then this will\n> just find the first one and return it, but that might not be the one\n> for the RangeTblEntry in question.\n>\n> Here's an example of the sort of thing that could have problems with this:\n>\n> postgres=# create role bob;\n> CREATE ROLE\n> postgres=# create table ab (a int, b int);\n> CREATE TABLE\n> postgres=# grant all (a) on table ab to bob;\n> GRANT\n> postgres=# set session authorization bob;\n> SET\n> postgres=> update ab set a = (select b from ab);\n> ERROR: permission denied for table ab\n>\n> The patch does correctly ERROR out here on permission failure, but as\n> far as I can see, that's just due to the fact that we're checking the\n> permissions of all items in the PlannedStmt.relpermlist List. If\n> there had been code that had tried to find the RelPermissionInfo based\n> on the relation's Oid then we'd have accidentally found that we only\n> need an UPDATE permission on (a). SELECT on (b) wouldn't have been\n> checked.\n>\n> As far as I can see, to fix that you'd either need to store the RTI of\n> the RelPermissionInfo and lookup based on that, or you'd have to\n> bms_union() all the columns and bitwise OR the required permissions\n> and ensure you only have 1 RelPermissionInfo per Oid.\n>\n> The fact that I have two entries when I debug InitPlan() seems to\n> disagree with what the comment in AddRelPermissionInfo() is claiming\n> should happen:\n>\n> /*\n> * To prevent duplicate entries for a given relation, check if already in\n> * the list.\n> */\n>\n> I'm not clear on if the list is meant to be unique on Oid or not.\n\nYeah, it is, but it seems that the code I added in\nadd_rtes_to_flat_rtable() to accumulate various subplans' relpermlists\ninto finalrelpermlist didn't actually bother to unique'ify the list.\nIt used list_concat() to combine finalrelpermlist and a given\nsubplan's relpermlist, instead of MergeRelPemissionInfos to merge the\ntwo lists.\n\nI've fixed that in the attached and can now see that the plan for the\nquery in your example ends up with just one RelPermissionInfo which\ncombines the permissions and column bitmapsets for all operations.\n\n> 6. acesss?\n>\n> - * Set flags and access permissions.\n> + * Set flags and initialize acesss permissions.\n>\n> 7. I was expecting to see an |= here:\n>\n> + /* \"merge\" proprties. */\n> + dest_perminfo->inh = src_perminfo->inh;\n>\n> Why is a plain assignment ok?\n\nYou're perhaps right that |= is correct. I forget the details but I\nthink I added 'inh' field to RelPemissionInfo to get some tests in\nsepgsql contrib module to pass and those tests apparently didn't mind\nthe current coding.\n\n> 8. Some indentation problems here:\n>\n> @@ -3170,6 +3148,8 @@ rewriteTargetView(Query *parsetree, Relation view)\n>\n> base_rt_index = rtr->rtindex;\n> base_rte = rt_fetch(base_rt_index, viewquery->rtable);\n> +base_perminfo = GetRelPermissionInfo(viewquery->relpermlist, base_rte,\n> + false);\n\nFixed.\n\n>\n> 9. You can use foreach_current_index(lc) + 1 in:\n>\n> + i = 0;\n> + foreach(lc, relpermlist)\n> + {\n> + perminfo = (RelPermissionInfo *) lfirst(lc);\n> + if (perminfo->relid == rte->relid)\n> + {\n> + /* And set the index in RTE. */\n> + rte->perminfoindex = i + 1;\n> + return perminfo;\n> + }\n> + i++;\n> + }\n\nOh, nice tip. Done.\n\n> 10. I think the double quote is not in the correct place in this comment:\n>\n> List *finalrtable; /* \"flat\" rangetable for executor */\n>\n> + List *finalrelpermlist; /* \"flat list of RelPermissionInfo \"*/\n>\n>\n> 11. Looks like an accidental change:\n>\n> +++ b/src/include/optimizer/planner.h\n> @@ -58,4 +58,5 @@ extern Path *get_cheapest_fractional_path(RelOptInfo *rel,\n>\n> extern Expr *preprocess_phv_expression(PlannerInfo *root, Expr *expr);\n>\n> +\n>\n> 12. These need to be broken into multiple lines:\n>\n> +extern RelPermissionInfo *AddRelPermissionInfo(List **relpermlist,\n> RangeTblEntry *rte);\n> +extern void MergeRelPermissionInfos(Query *dest_query, List *src_relpermlist);\n> +extern RelPermissionInfo *GetRelPermissionInfo(List *relpermlist,\n> RangeTblEntry *rte, bool missing_ok);\n\nAll fixed.\n\nv11 attached.\n\n--\nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Thu, 31 Mar 2022 12:16:02 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "This is failing regression tests. I don't understand how this patch\ncould be affecting this test though. Perhaps it's a problem with the\njson patches that were committed recently -- but they don't seem to be\ncausing other patches to fail.\n\n\ndiff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/jsonb_sqljson.out\n/tmp/cirrus-ci-build/src/test/regress/results/jsonb_sqljson.out\n--- /tmp/cirrus-ci-build/src/test/regress/expected/jsonb_sqljson.out\n2022-04-05 12:15:40.590168291 +0000\n+++ /tmp/cirrus-ci-build/src/test/regress/results/jsonb_sqljson.out\n2022-04-05 12:20:17.338045137 +0000\n@@ -1159,37 +1159,37 @@\n );\n \\sv jsonb_table_view\n CREATE OR REPLACE VIEW public.jsonb_table_view AS\n- SELECT \"json_table\".id,\n- \"json_table\".id2,\n- \"json_table\".\"int\",\n- \"json_table\".text,\n- \"json_table\".\"char(4)\",\n- \"json_table\".bool,\n- \"json_table\".\"numeric\",\n- \"json_table\".domain,\n- \"json_table\".js,\n- \"json_table\".jb,\n- \"json_table\".jst,\n- \"json_table\".jsc,\n- \"json_table\".jsv,\n- \"json_table\".jsb,\n- \"json_table\".jsbq,\n- \"json_table\".aaa,\n- \"json_table\".aaa1,\n- \"json_table\".exists1,\n- \"json_table\".exists2,\n- \"json_table\".exists3,\n- \"json_table\".js2,\n- \"json_table\".jsb2w,\n- \"json_table\".jsb2q,\n- \"json_table\".ia,\n- \"json_table\".ta,\n- \"json_table\".jba,\n- \"json_table\".a1,\n- \"json_table\".b1,\n- \"json_table\".a11,\n- \"json_table\".a21,\n- \"json_table\".a22\n+ SELECT id,\n+ id2,\n+ \"int\",\n+ text,\n+ \"char(4)\",\n+ bool,\n+ \"numeric\",\n+ domain,\n+ js,\n+ jb,\n+ jst,\n+ jsc,\n+ jsv,\n+ jsb,\n+ jsbq,\n+ aaa,\n+ aaa1,\n+ exists1,\n+ exists2,\n+ exists3,\n+ js2,\n+ jsb2w,\n+ jsb2q,\n+ ia,\n+ ta,\n+ jba,\n+ a1,\n+ b1,\n+ a11,\n+ a21,\n+ a22\n FROM JSON_TABLE(\n 'null'::jsonb, '$[*]'\n PASSING\n\n\nOn Wed, 30 Mar 2022 at 23:16, Amit Langote <amitlangote09@gmail.com> wrote:\n>\n> On Fri, Mar 25, 2022 at 4:46 AM David Rowley <dgrowleyml@gmail.com> wrote:\n> > I had a look at the v10-0001 patch. I agree that it seems to be a good\n> > idea to separate out the required permission checks rather than having\n> > the Bitmapset to index the interesting range table entries.\n>\n> Thanks David for taking a look at this.\n>\n> > One thing that I could just not determine from looking at the patch\n> > was if there's meant to be just 1 RelPermissionInfo per RTE or per rel\n> > Oid.\n>\n> It's the latter.\n>\n> > None of the comments helped me understand this\n>\n> I agree that the comment above the RelPermissionInfo struct definition\n> missed mentioning this really important bit. I've tried fixing that\n> as:\n>\n> @@ -1142,7 +1142,9 @@ typedef struct RangeTblEntry\n> * Per-relation information for permission checking. Added to the query\n> * by the parser when populating the query range table and subsequently\n> * editorialized on by the rewriter and the planner. There is an entry\n> - * each for all RTE_RELATION entries present in the range table.\n> + * each for all RTE_RELATION entries present in the range table, though\n> + * different RTEs for the same relation share the\n> RelPermissionInfo, that\n> + * is, there is only one RelPermissionInfo containing a given relid.\n>\n> > and\n> > MergeRelPermissionInfos() seems to exist that appears to try and\n> > uniquify this to some extent. I just see no code that does this\n> > process for a single query level. I've provided more detail on this in\n> > #5 below.\n> >\n> > Here's my complete review of v10-0001:\n> >\n> > 1. ExecCheckPermisssions -> ExecCheckPermissions\n>\n> Fixed.\n>\n> > 2. I think you'll want to move the userid field away from below a\n> > comment that claims the following fields are for foreign tables only.\n> >\n> > /* Information about foreign tables and foreign joins */\n> > Oid serverid; /* identifies server for the table or join */\n> > - Oid userid; /* identifies user to check access as */\n> > + Oid userid; /* identifies user to check access as; set\n> > + * in non-foreign table relations too! */\n>\n> Actually, I realized that the comment should not have been touched to\n> begin with. Reverted.\n>\n> > 3. This should use READ_OID_FIELD()\n> >\n> > READ_INT_FIELD(checkAsUser);\n> >\n> > and this one:\n> >\n> > READ_INT_FIELD(relid);\n>\n> Fixed.\n>\n> > 4. This looks wrong:\n> >\n> > - rel->userid = rte->checkAsUser;\n> > + if (rte->rtekind == RTE_RELATION)\n> > + {\n> > + /* otherrels use the root parent's value. */\n> > + rel->userid = parent ? parent->userid :\n> > + GetRelPermissionInfo(root->parse->relpermlist,\n> > + rte, false)->checkAsUser;\n> > + }\n> >\n> > If 'parent' is false then you'll assign the result of\n> > GetRelPermissionInfo (a RelPermissionInfo *) to an Oid.\n>\n> Hmm, I don't see a problem, because what's being assigned is\n> `GetRelPermissionInfo(...)->checkAsUser`.\n>\n> Anyway, I rewrote the block more verbosely as:\n>\n> if (rte->rtekind == RTE_RELATION)\n> {\n> - /* otherrels use the root parent's value. */\n> - rel->userid = parent ? parent->userid :\n> - GetRelPermissionInfo(root->parse->relpermlist,\n> - rte, false)->checkAsUser;\n> + /*\n> + * Get the userid from the relation's RelPermissionInfo, though\n> + * only the tables mentioned in query are assigned RelPermissionInfos.\n> + * Child relations (otherrels) simply use the parent's value.\n> + */\n> + if (parent == NULL)\n> + {\n> + RelPermissionInfo *perminfo =\n> + GetRelPermissionInfo(root->parse->relpermlist, rte, false);\n> +\n> + rel->userid = perminfo->checkAsUser;\n> + }\n> + else\n> + rel->userid = parent->userid;\n> }\n> + else\n> + rel->userid = InvalidOid;\n>\n> > 5. I'm not sure if there's a case that can break this one, but I'm not\n> > very confident that there's not one:\n> >\n> > I'm not sure I agree with how you've coded GetRelPermissionInfo().\n> > You're searching for a RelPermissionInfo based on the table's Oid. If\n> > you have multiple RelPermissionInfos for the same Oid then this will\n> > just find the first one and return it, but that might not be the one\n> > for the RangeTblEntry in question.\n> >\n> > Here's an example of the sort of thing that could have problems with this:\n> >\n> > postgres=# create role bob;\n> > CREATE ROLE\n> > postgres=# create table ab (a int, b int);\n> > CREATE TABLE\n> > postgres=# grant all (a) on table ab to bob;\n> > GRANT\n> > postgres=# set session authorization bob;\n> > SET\n> > postgres=> update ab set a = (select b from ab);\n> > ERROR: permission denied for table ab\n> >\n> > The patch does correctly ERROR out here on permission failure, but as\n> > far as I can see, that's just due to the fact that we're checking the\n> > permissions of all items in the PlannedStmt.relpermlist List. If\n> > there had been code that had tried to find the RelPermissionInfo based\n> > on the relation's Oid then we'd have accidentally found that we only\n> > need an UPDATE permission on (a). SELECT on (b) wouldn't have been\n> > checked.\n> >\n> > As far as I can see, to fix that you'd either need to store the RTI of\n> > the RelPermissionInfo and lookup based on that, or you'd have to\n> > bms_union() all the columns and bitwise OR the required permissions\n> > and ensure you only have 1 RelPermissionInfo per Oid.\n> >\n> > The fact that I have two entries when I debug InitPlan() seems to\n> > disagree with what the comment in AddRelPermissionInfo() is claiming\n> > should happen:\n> >\n> > /*\n> > * To prevent duplicate entries for a given relation, check if already in\n> > * the list.\n> > */\n> >\n> > I'm not clear on if the list is meant to be unique on Oid or not.\n>\n> Yeah, it is, but it seems that the code I added in\n> add_rtes_to_flat_rtable() to accumulate various subplans' relpermlists\n> into finalrelpermlist didn't actually bother to unique'ify the list.\n> It used list_concat() to combine finalrelpermlist and a given\n> subplan's relpermlist, instead of MergeRelPemissionInfos to merge the\n> two lists.\n>\n> I've fixed that in the attached and can now see that the plan for the\n> query in your example ends up with just one RelPermissionInfo which\n> combines the permissions and column bitmapsets for all operations.\n>\n> > 6. acesss?\n> >\n> > - * Set flags and access permissions.\n> > + * Set flags and initialize acesss permissions.\n> >\n> > 7. I was expecting to see an |= here:\n> >\n> > + /* \"merge\" proprties. */\n> > + dest_perminfo->inh = src_perminfo->inh;\n> >\n> > Why is a plain assignment ok?\n>\n> You're perhaps right that |= is correct. I forget the details but I\n> think I added 'inh' field to RelPemissionInfo to get some tests in\n> sepgsql contrib module to pass and those tests apparently didn't mind\n> the current coding.\n>\n> > 8. Some indentation problems here:\n> >\n> > @@ -3170,6 +3148,8 @@ rewriteTargetView(Query *parsetree, Relation view)\n> >\n> > base_rt_index = rtr->rtindex;\n> > base_rte = rt_fetch(base_rt_index, viewquery->rtable);\n> > +base_perminfo = GetRelPermissionInfo(viewquery->relpermlist, base_rte,\n> > + false);\n>\n> Fixed.\n>\n> >\n> > 9. You can use foreach_current_index(lc) + 1 in:\n> >\n> > + i = 0;\n> > + foreach(lc, relpermlist)\n> > + {\n> > + perminfo = (RelPermissionInfo *) lfirst(lc);\n> > + if (perminfo->relid == rte->relid)\n> > + {\n> > + /* And set the index in RTE. */\n> > + rte->perminfoindex = i + 1;\n> > + return perminfo;\n> > + }\n> > + i++;\n> > + }\n>\n> Oh, nice tip. Done.\n>\n> > 10. I think the double quote is not in the correct place in this comment:\n> >\n> > List *finalrtable; /* \"flat\" rangetable for executor */\n> >\n> > + List *finalrelpermlist; /* \"flat list of RelPermissionInfo \"*/\n> >\n> >\n> > 11. Looks like an accidental change:\n> >\n> > +++ b/src/include/optimizer/planner.h\n> > @@ -58,4 +58,5 @@ extern Path *get_cheapest_fractional_path(RelOptInfo *rel,\n> >\n> > extern Expr *preprocess_phv_expression(PlannerInfo *root, Expr *expr);\n> >\n> > +\n> >\n> > 12. These need to be broken into multiple lines:\n> >\n> > +extern RelPermissionInfo *AddRelPermissionInfo(List **relpermlist,\n> > RangeTblEntry *rte);\n> > +extern void MergeRelPermissionInfos(Query *dest_query, List *src_relpermlist);\n> > +extern RelPermissionInfo *GetRelPermissionInfo(List *relpermlist,\n> > RangeTblEntry *rte, bool missing_ok);\n>\n> All fixed.\n>\n> v11 attached.\n>\n> --\n> Amit Langote\n> EDB: http://www.enterprisedb.com\n\n\n\n--\ngreg\n\n\n", "msg_date": "Tue, 5 Apr 2022 10:26:58 -0400", "msg_from": "Greg Stark <stark@mit.edu>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, 6 Apr 2022 at 02:27, Greg Stark <stark@mit.edu> wrote:\n>\n> This is failing regression tests. I don't understand how this patch\n> could be affecting this test though. Perhaps it's a problem with the\n> json patches that were committed recently -- but they don't seem to be\n> causing other patches to fail.\n\nI think this will just be related to the useprefix =\nlist_length(es->rtable) > 1; in show_plan_tlist(). There's likely not\nmuch point in keeping the RTE for the view anymore. IIRC it was just\nthere to check permissions. Amit has now added another way of doing\nthose.\n\nDavid\n\n\n", "msg_date": "Wed, 6 Apr 2022 08:21:40 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, Apr 6, 2022 at 5:22 AM David Rowley <dgrowleyml@gmail.com> wrote:\n> On Wed, 6 Apr 2022 at 02:27, Greg Stark <stark@mit.edu> wrote:\n> >\n> > This is failing regression tests. I don't understand how this patch\n> > could be affecting this test though. Perhaps it's a problem with the\n> > json patches that were committed recently -- but they don't seem to be\n> > causing other patches to fail.\n>\n> I think this will just be related to the useprefix =\n> list_length(es->rtable) > 1; in show_plan_tlist(). There's likely not\n> much point in keeping the RTE for the view anymore. IIRC it was just\n> there to check permissions. Amit has now added another way of doing\n> those.\n\nThat is correct.\n\nI have rebased the patch and updated expected output of the failing test.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 6 Apr 2022 10:18:48 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Rebased to keep the cfbot green for now.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 11 Apr 2022 14:41:16 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Mon, Apr 11, 2022 at 2:41 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> Rebased to keep the cfbot green for now.\n\nAnd again to fix the rules.out conflicts.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 6 Jul 2022 12:25:42 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Rebased over 964d01ae90.\n\n\n--\nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 13 Jul 2022 17:00:29 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, Jul 13, 2022 at 5:00 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> Rebased over 964d01ae90.\n\nRebased over 2d04277121f.\n\n\n--\nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 27 Jul 2022 12:14:10 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Amit Langote <amitlangote09@gmail.com> writes:\n> [ v16 patches ]\n\nI took a quick look at this ...\n\nI think that the notion behind MergeRelPermissionInfos, ie that\na single RelPermissionInfo can represent *all* the checks for\na given table OID, is fundamentally wrong. For example, when\nmerging a view into an outer query that references a table\nalso used by the view, the checkAsUser fields might be different,\nand the permissions to check might be different, and the columns\nthose permissions need to hold for might be different. Blindly\nbms_union'ing the column sets will lead to requiring far more\npermissions than the query should require. Conversely, this\napproach could lead to allowing cases we should reject, if you\nhappen to \"merge\" checkAsUser in a way that ends in checking as a\nhigher-privilege user than should be checked.\n\nI'm inclined to think that you should abandon the idea of\nmerging RelPermissionInfos at all. It can only buy us much\nin the case of self-joins, which ought to be rare. It'd\nbe better to just say \"there is one RelPermissionInfo for\neach RTE requiring any sort of permissions check\". Either\nthat or you need to complicate RelPermissionInfo a lot, but\nI don't see the advantage.\n\nIt'd likely be better to rename ExecutorCheckPerms_hook,\nsay to ExecCheckPermissions_hook given the rename of\nExecCheckRTPerms. As it stands, it *looks* like the API\nof that hook has not changed, when it has. Better to\nbreak calling code visibly than to make people debug their\nway to an understanding that the List contents are no longer\nwhat they expected. A different idea could be to pass both\nthe rangetable and relpermlist, again making the API break obvious\n(and who's to say a hook might not still want the rangetable?)\n\nIn parsenodes.h:\n+ List *relpermlist; /* list of RTEPermissionInfo nodes for\n+ * the RTE_RELATION entries in rtable */\n\nI find this comment not very future-proof, if indeed it's strictly\ncorrect even today. Maybe better \"list of RelPermissionInfo nodes for\nrangetable entries having perminfoindex > 0\". Likewise for the comment\nin RangeTableEntry: there's no compelling reason to assume that all and\nonly RELATION RTEs will have RelPermissionInfo. Even if that remains\ntrue at parse time it's falsified during planning.\n\nAlso note typo in node name: that comment is the only reference to\n\"RTEPermissionInfo\" AFAICS. Although, given the redefinition I\nsuggest above, arguably \"RTEPermissionInfo\" is the better name?\n\nI'm confused as to why RelPermissionInfo.inh exists. It doesn't\nseem to me that permissions checking should care about child rels.\n\nWhy did you add checkAsUser to ForeignScan (and not any other scan\nplan nodes)? At best that's pretty asymmetric, but it seems mighty\nbogus: under what circumstances would an FDW need to know that but\nnot any of the other RelPermissionInfo fields? This seems to\nindicate that someplace we should work harder at making the\nRelPermissionInfo list available to FDWs. (CustomScan providers\nmight have similar issues, btw.)\n\nI've not looked at much of the actual code, just the .h file changes.\nHaven't studied 0002 either.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 27 Jul 2022 17:04:15 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "... One more thing: maybe we should rethink where to put\nextraUpdatedCols. Between the facts that it's not used for\nactual permissions checks, and that it's calculated by the\nrewriter not parser, it doesn't seem like it really belongs\nin RelPermissionInfo. Should we keep it in RangeTblEntry?\nShould it go somewhere else entirely? I'm just speculating,\nbut now is a good time to think about it.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 27 Jul 2022 17:18:38 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Thu, Jul 28, 2022 at 6:04 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> Amit Langote <amitlangote09@gmail.com> writes:\n> > [ v16 patches ]\n>\n> I took a quick look at this ...\n\nThanks for the review and sorry about the delay.\n\n> I think that the notion behind MergeRelPermissionInfos, ie that\n> a single RelPermissionInfo can represent *all* the checks for\n> a given table OID, is fundamentally wrong. For example, when\n> merging a view into an outer query that references a table\n> also used by the view, the checkAsUser fields might be different,\n> and the permissions to check might be different, and the columns\n> those permissions need to hold for might be different. Blindly\n> bms_union'ing the column sets will lead to requiring far more\n> permissions than the query should require. Conversely, this\n> approach could lead to allowing cases we should reject, if you\n> happen to \"merge\" checkAsUser in a way that ends in checking as a\n> higher-privilege user than should be checked.\n>\n> I'm inclined to think that you should abandon the idea of\n> merging RelPermissionInfos at all. It can only buy us much\n> in the case of self-joins, which ought to be rare. It'd\n> be better to just say \"there is one RelPermissionInfo for\n> each RTE requiring any sort of permissions check\". Either\n> that or you need to complicate RelPermissionInfo a lot, but\n> I don't see the advantage.\n\nOK, I agree that the complexity of sharing a RelPermissionInfo between\nRTEs far exceeds any performance benefit to be had from it.\n\nI have changed things so that there's one RelPermissionInfo for every\nRTE_RELATION entry in the range table, except those that the planner\nadds when expanding inheritance.\n\n> It'd likely be better to rename ExecutorCheckPerms_hook,\n> say to ExecCheckPermissions_hook given the rename of\n> ExecCheckRTPerms. As it stands, it *looks* like the API\n> of that hook has not changed, when it has. Better to\n> break calling code visibly than to make people debug their\n> way to an understanding that the List contents are no longer\n> what they expected. A different idea could be to pass both\n> the rangetable and relpermlist, again making the API break obvious\n> (and who's to say a hook might not still want the rangetable?)\n\nI agree it'd be better to break the API more explicitly. Actually, I\ndecided to adopt both of these suggestions: renamed the hook and kept\nthe rangeTable parameter.\n\n> In parsenodes.h:\n> + List *relpermlist; /* list of RTEPermissionInfo nodes for\n> + * the RTE_RELATION entries in rtable */\n>\n> I find this comment not very future-proof, if indeed it's strictly\n> correct even today. Maybe better \"list of RelPermissionInfo nodes for\n> rangetable entries having perminfoindex > 0\". Likewise for the comment\n> in RangeTableEntry: there's no compelling reason to assume that all and\n> only RELATION RTEs will have RelPermissionInfo. Even if that remains\n> true at parse time it's falsified during planning.\n\nAh right, inheritance children's RTE_RELATION entries don't have one.\nI've fixed the comment.\n\n> Also note typo in node name: that comment is the only reference to\n> \"RTEPermissionInfo\" AFAICS. Although, given the redefinition I\n> suggest above, arguably \"RTEPermissionInfo\" is the better name?\n\nAgreed. I've renamed RelPermissionInfo to RTEPermissionInfo and\nrelpermlist to rtepermlist.\n\n> I'm confused as to why RelPermissionInfo.inh exists. It doesn't\n> seem to me that permissions checking should care about child rels.\n\nI had to do this for contrib/sepgsql, sepgsql_dml_privileges() has this:\n\n /*\n * If this RangeTblEntry is also supposed to reference inherited\n * tables, we need to check security label of the child tables. So, we\n * expand rte->relid into list of OIDs of inheritance hierarchy, then\n * checker routine will be invoked for each relations.\n */\n if (!rte->inh)\n tableIds = list_make1_oid(rte->relid);\n else\n tableIds = find_all_inheritors(rte->relid, NoLock, NULL);\n\n> Why did you add checkAsUser to ForeignScan (and not any other scan\n> plan nodes)? At best that's pretty asymmetric, but it seems mighty\n> bogus: under what circumstances would an FDW need to know that but\n> not any of the other RelPermissionInfo fields? This seems to\n> indicate that someplace we should work harder at making the\n> RelPermissionInfo list available to FDWs. (CustomScan providers\n> might have similar issues, btw.)\n\nI think I had tried doing what you are suggesting -- getting the\ncheckAsUser from a RelPermissionInfo rather than putting that in\nForeignScan -- though we can't do it, because we need the userid for\nchild foreign table relations, for which we don't create a\nRelPermissionInfo. ForeignScan nodes for child relations don't store\ntheir root parent's RT index, so we can't get the checkAsUser using\nthe root parent's RelPermissionInfo, like I could do for child foreign\ntable \"result\" relations using ResultRelInfo.ri_RootResultRelInfo.\n\nAs to why an FDW may not need to know any of the other\nRelPermissionInfo fields, IIUC, ExecCheckPermissions() would have done\neverything that ought to be done *locally* using that information.\nWhatever the remote side needs to know wrt access permission checking\nshould have been put in fdw_private, no?\n\nOn Thu, Jul 28, 2022 at 6:18 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> ... One more thing: maybe we should rethink where to put\n> extraUpdatedCols. Between the facts that it's not used for\n> actual permissions checks, and that it's calculated by the\n> rewriter not parser, it doesn't seem like it really belongs\n> in RelPermissionInfo. Should we keep it in RangeTblEntry?\n> Should it go somewhere else entirely? I'm just speculating,\n> but now is a good time to think about it.\n\nIndeed, extraUpdatedCols doesn't really seem to belong in\nRelPermissionInfo, so I have left it in RangeTblEntry.\n\nAttached updated patches.\n--\nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 7 Sep 2022 18:23:06 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Hi,\n\nOn 2022-09-07 18:23:06 +0900, Amit Langote wrote:\n> Attached updated patches.\n\nThanks to Justin's recent patch (89d16b63527) to add\n-DRELCACHE_FORCE_RELEASE -DCOPY_PARSE_PLAN_TREES -DWRITE_READ_PARSE_PLAN_TREES -DRAW_EXPRESSION_COVERAGE_TEST\nto the FreeBSD ci task we now see the following:\n\nhttps://cirrus-ci.com/task/4772259058417664\nhttps://api.cirrus-ci.com/v1/artifact/task/4772259058417664/testrun/build/testrun/main/regress/regression.diffs\n\ndiff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/updatable_views.out /tmp/cirrus-ci-build/build/testrun/main/regress/results/updatable_views.out\n--- /tmp/cirrus-ci-build/src/test/regress/expected/updatable_views.out\t2022-10-02 10:37:08.888945000 +0000\n+++ /tmp/cirrus-ci-build/build/testrun/main/regress/results/updatable_views.out\t2022-10-02 10:40:26.947887000 +0000\n@@ -1727,14 +1727,16 @@\n (4 rows)\n\n UPDATE base_tbl SET id = 2000 WHERE id = 2;\n+WARNING: outfuncs/readfuncs failed to produce an equal rewritten parse tree\n UPDATE rw_view1 SET id = 3000 WHERE id = 3;\n+WARNING: outfuncs/readfuncs failed to produce an equal rewritten parse tree\n SELECT * FROM base_tbl;\n id | idplus1\n ------+---------\n 1 | 2\n 4 | 5\n- 2000 | 2001\n- 3000 | 3001\n+ 2000 | 3\n+ 3000 | 4\n (4 rows)\n\n DROP TABLE base_tbl CASCADE;\n\nand many more.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Sun, 2 Oct 2022 10:10:36 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Hi,\n\nOn Mon, Oct 3, 2022 at 2:10 AM Andres Freund <andres@anarazel.de> wrote:\n> On 2022-09-07 18:23:06 +0900, Amit Langote wrote:\n> > Attached updated patches.\n>\n> Thanks to Justin's recent patch (89d16b63527) to add\n> -DRELCACHE_FORCE_RELEASE -DCOPY_PARSE_PLAN_TREES -DWRITE_READ_PARSE_PLAN_TREES -DRAW_EXPRESSION_COVERAGE_TEST\n> to the FreeBSD ci task we now see the following:\n>\n> https://cirrus-ci.com/task/4772259058417664\n> https://api.cirrus-ci.com/v1/artifact/task/4772259058417664/testrun/build/testrun/main/regress/regression.diffs\n>\n> diff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/updatable_views.out /tmp/cirrus-ci-build/build/testrun/main/regress/results/updatable_views.out\n> --- /tmp/cirrus-ci-build/src/test/regress/expected/updatable_views.out 2022-10-02 10:37:08.888945000 +0000\n> +++ /tmp/cirrus-ci-build/build/testrun/main/regress/results/updatable_views.out 2022-10-02 10:40:26.947887000 +0000\n> @@ -1727,14 +1727,16 @@\n> (4 rows)\n>\n> UPDATE base_tbl SET id = 2000 WHERE id = 2;\n> +WARNING: outfuncs/readfuncs failed to produce an equal rewritten parse tree\n> UPDATE rw_view1 SET id = 3000 WHERE id = 3;\n> +WARNING: outfuncs/readfuncs failed to produce an equal rewritten parse tree\n> SELECT * FROM base_tbl;\n> id | idplus1\n> ------+---------\n> 1 | 2\n> 4 | 5\n> - 2000 | 2001\n> - 3000 | 3001\n> + 2000 | 3\n> + 3000 | 4\n> (4 rows)\n>\n> DROP TABLE base_tbl CASCADE;\n>\n> and many more.\n\nThanks for the heads up. Grateful for those new -D flags.\n\nTurns out I had forgotten to update out/readRangeTblEntry() after\nbringing extraUpdatedCols back into RangeTblEntry per Tom's comment.\n\nFixed in the attached.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 3 Oct 2022 18:10:13 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Thu, Jul 28, 2022 at 6:18 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> ... One more thing: maybe we should rethink where to put\n> extraUpdatedCols. Between the facts that it's not used for\n> actual permissions checks, and that it's calculated by the\n> rewriter not parser, it doesn't seem like it really belongs\n> in RelPermissionInfo. Should we keep it in RangeTblEntry?\n> Should it go somewhere else entirely? I'm just speculating,\n> but now is a good time to think about it.\n\nAfter fixing the issue related to this mentioned by Andres, I started\nthinking more about this, especially the \"Should it go somewhere else\nentirely?\" part.\n\nI've kept extraUpdatedCols in RangeTblEntry in the latest patch, but\nperhaps it makes sense to put that into Query? So, the stanza in\nRewriteQuery() that sets extraUpdatedCols will be changed as:\n\n@@ -3769,7 +3769,8 @@ RewriteQuery(Query *parsetree, List *rewrite_events)\n NULL, 0, NULL);\n\n /* Also populate extraUpdatedCols (for generated columns) */\n- fill_extraUpdatedCols(rt_entry, rt_perminfo, rt_entry_relation);\n+ parsetree->extraUpdatedCols =\n+ get_extraUpdatedCols(rt_entry, rt_perminfo, rt_entry_relation);\n }\n else if (event == CMD_MERGE)\n {\n\nThen, like withCheckOptionLists et al, have grouping_planner()\npopulate an extraUpdatedColsBitmaps in ModifyTable.\nExecInitModifyTable() will assign them directly into ResultRelInfos.\nThat way, anyplace in the executor that needs to look at\nextraUpdatedCols of a given result relation can get that from its\nResultRelInfo, rather than from the RangeTblEntry as now.\n\nThoughts?\n\n\n--\nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 4 Oct 2022 12:45:30 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Amit Langote <amitlangote09@gmail.com> writes:\n> On Thu, Jul 28, 2022 at 6:18 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> ... One more thing: maybe we should rethink where to put\n>> extraUpdatedCols. Between the facts that it's not used for\n>> actual permissions checks, and that it's calculated by the\n>> rewriter not parser, it doesn't seem like it really belongs\n>> in RelPermissionInfo. Should we keep it in RangeTblEntry?\n>> Should it go somewhere else entirely? I'm just speculating,\n>> but now is a good time to think about it.\n\n> I've kept extraUpdatedCols in RangeTblEntry in the latest patch, but\n> perhaps it makes sense to put that into Query?\n\nThat's got morally the same problem as keeping it in RangeTblEntry:\nthose are structures that are built by the parser. Hacking on them\nlater isn't terribly clean.\n\nI wonder if it could make sense to postpone calculation of the\nextraUpdatedCols out of the rewriter and into the planner, with\nthe idea that it ends up $someplace in the finished plan tree\nbut isn't part of the original parsetree.\n\nA different aspect of this is that putting it in Query doesn't\nmake a lot of sense unless there is only one version of the\nbitmap per Query. In simple UPDATEs that would be true, but\nI think that inherited/partitioned UPDATEs would need one per\nresult relation, which is likely the reason it got dumped in\nRangeTblEntry to begin with.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 03 Oct 2022 23:54:27 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Tue, Oct 4, 2022 at 12:54 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> Amit Langote <amitlangote09@gmail.com> writes:\n> > On Thu, Jul 28, 2022 at 6:18 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >> ... One more thing: maybe we should rethink where to put\n> >> extraUpdatedCols. Between the facts that it's not used for\n> >> actual permissions checks, and that it's calculated by the\n> >> rewriter not parser, it doesn't seem like it really belongs\n> >> in RelPermissionInfo. Should we keep it in RangeTblEntry?\n> >> Should it go somewhere else entirely? I'm just speculating,\n> >> but now is a good time to think about it.\n>\n> > I've kept extraUpdatedCols in RangeTblEntry in the latest patch, but\n> > perhaps it makes sense to put that into Query?\n>\n> That's got morally the same problem as keeping it in RangeTblEntry:\n> those are structures that are built by the parser. Hacking on them\n> later isn't terribly clean.\n>\n> I wonder if it could make sense to postpone calculation of the\n> extraUpdatedCols out of the rewriter and into the planner, with\n> the idea that it ends up $someplace in the finished plan tree\n> but isn't part of the original parsetree.\n\nLooking at PlannerInfo.update_colnos, something that's needed for\nexecution but not in Query, maybe we can make preprocess_targetlist()\nalso populate an PlannerInfo.extraUpdatedCols?\n\n> A different aspect of this is that putting it in Query doesn't\n> make a lot of sense unless there is only one version of the\n> bitmap per Query. In simple UPDATEs that would be true, but\n> I think that inherited/partitioned UPDATEs would need one per\n> result relation, which is likely the reason it got dumped in\n> RangeTblEntry to begin with.\n\nYeah, so if we have PlannerInfos.extraUpdatedCols as the root table's\nversion of that, grouping_planner() can make copies for all result\nrelations and put the list in ModifyTable.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 4 Oct 2022 13:11:08 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Tue, Oct 4, 2022 at 1:11 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Tue, Oct 4, 2022 at 12:54 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> > Amit Langote <amitlangote09@gmail.com> writes:\n> > > On Thu, Jul 28, 2022 at 6:18 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> > >> ... One more thing: maybe we should rethink where to put\n> > >> extraUpdatedCols. Between the facts that it's not used for\n> > >> actual permissions checks, and that it's calculated by the\n> > >> rewriter not parser, it doesn't seem like it really belongs\n> > >> in RelPermissionInfo. Should we keep it in RangeTblEntry?\n> > >> Should it go somewhere else entirely? I'm just speculating,\n> > >> but now is a good time to think about it.\n> >\n> > > I've kept extraUpdatedCols in RangeTblEntry in the latest patch, but\n> > > perhaps it makes sense to put that into Query?\n> >\n> > That's got morally the same problem as keeping it in RangeTblEntry:\n> > those are structures that are built by the parser. Hacking on them\n> > later isn't terribly clean.\n> >\n> > I wonder if it could make sense to postpone calculation of the\n> > extraUpdatedCols out of the rewriter and into the planner, with\n> > the idea that it ends up $someplace in the finished plan tree\n> > but isn't part of the original parsetree.\n>\n> Looking at PlannerInfo.update_colnos, something that's needed for\n> execution but not in Query, maybe we can make preprocess_targetlist()\n> also populate an PlannerInfo.extraUpdatedCols?\n>\n> > A different aspect of this is that putting it in Query doesn't\n> > make a lot of sense unless there is only one version of the\n> > bitmap per Query. In simple UPDATEs that would be true, but\n> > I think that inherited/partitioned UPDATEs would need one per\n> > result relation, which is likely the reason it got dumped in\n> > RangeTblEntry to begin with.\n>\n> Yeah, so if we have PlannerInfos.extraUpdatedCols as the root table's\n> version of that, grouping_planner() can make copies for all result\n> relations and put the list in ModifyTable.\n\nI tried in the attached 0004. ModifyTable gets a new member\nextraUpdatedColsBitmaps, which is List of Bitmapset \"nodes\".\n\nActually, List of Bitmapsets turned out to be something that doesn't\njust-work with our Node infrastructure, which I found out thanks to\n-DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\nfirst-class support for copy/equal/write/read support for Bitmapsets,\nsuch that writeNode() can write appropriately labeled versions of them\nand nodeRead() can read them as Bitmapsets. That's done in 0003. I\ndidn't actually go ahead and make *all* Bitmapsets in the plan trees\nto be Nodes, but maybe 0003 can be expanded to do that. We won't need\nto make gen_node_support.pl emit *_BITMAPSET_FIELD() blurbs then; can\njust use *_NODE_FIELD().\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Thu, 6 Oct 2022 22:29:46 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Thu, Oct 6, 2022 at 10:29 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> Actually, List of Bitmapsets turned out to be something that doesn't\n> just-work with our Node infrastructure, which I found out thanks to\n> -DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\n> first-class support for copy/equal/write/read support for Bitmapsets,\n> such that writeNode() can write appropriately labeled versions of them\n> and nodeRead() can read them as Bitmapsets. That's done in 0003. I\n> didn't actually go ahead and make *all* Bitmapsets in the plan trees\n> to be Nodes, but maybe 0003 can be expanded to do that. We won't need\n> to make gen_node_support.pl emit *_BITMAPSET_FIELD() blurbs then; can\n> just use *_NODE_FIELD().\n\nAll meson builds on the cfbot machines seem to have failed, maybe\nbecause I didn't update src/include/nodes/meson.build to add\n'nodes/bitmapset.h' to the `node_support_input_i` collection. Here's\nan updated version assuming that's the problem. (Will set up meson\nbuilds on my machine to avoid this in the future.)\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 7 Oct 2022 10:04:26 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Oct 7, 2022 at 10:04 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Thu, Oct 6, 2022 at 10:29 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > Actually, List of Bitmapsets turned out to be something that doesn't\n> > just-work with our Node infrastructure, which I found out thanks to\n> > -DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\n> > first-class support for copy/equal/write/read support for Bitmapsets,\n> > such that writeNode() can write appropriately labeled versions of them\n> > and nodeRead() can read them as Bitmapsets. That's done in 0003. I\n> > didn't actually go ahead and make *all* Bitmapsets in the plan trees\n> > to be Nodes, but maybe 0003 can be expanded to do that. We won't need\n> > to make gen_node_support.pl emit *_BITMAPSET_FIELD() blurbs then; can\n> > just use *_NODE_FIELD().\n>\n> All meson builds on the cfbot machines seem to have failed, maybe\n> because I didn't update src/include/nodes/meson.build to add\n> 'nodes/bitmapset.h' to the `node_support_input_i` collection. Here's\n> an updated version assuming that's the problem. (Will set up meson\n> builds on my machine to avoid this in the future.)\n\nAnd... noticed that a postgres_fdw test failed, because\n_readBitmapset() not having been changed to set NodeTag would\n\"corrupt\" any Bitmapsets that were created with it set.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 7 Oct 2022 13:25:42 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Oct 7, 2022 at 1:25 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Fri, Oct 7, 2022 at 10:04 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> > On Thu, Oct 6, 2022 at 10:29 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > Actually, List of Bitmapsets turned out to be something that doesn't\n> > > just-work with our Node infrastructure, which I found out thanks to\n> > > -DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\n> > > first-class support for copy/equal/write/read support for Bitmapsets,\n> > > such that writeNode() can write appropriately labeled versions of them\n> > > and nodeRead() can read them as Bitmapsets. That's done in 0003. I\n> > > didn't actually go ahead and make *all* Bitmapsets in the plan trees\n> > > to be Nodes, but maybe 0003 can be expanded to do that. We won't need\n> > > to make gen_node_support.pl emit *_BITMAPSET_FIELD() blurbs then; can\n> > > just use *_NODE_FIELD().\n> >\n> > All meson builds on the cfbot machines seem to have failed, maybe\n> > because I didn't update src/include/nodes/meson.build to add\n> > 'nodes/bitmapset.h' to the `node_support_input_i` collection. Here's\n> > an updated version assuming that's the problem. (Will set up meson\n> > builds on my machine to avoid this in the future.)\n>\n> And... noticed that a postgres_fdw test failed, because\n> _readBitmapset() not having been changed to set NodeTag would\n> \"corrupt\" any Bitmapsets that were created with it set.\n\nBroke the other cases while fixing the above. Attaching a new version\nagain. In the latest version, I'm setting Bitmapset.type by hand with\nan XXX comment nearby saying that it would be nice to change that to\nmakeNode(Bitmapset), which I know sounds pretty ad-hoc.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 7 Oct 2022 15:49:56 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Oct 7, 2022 at 3:49 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Fri, Oct 7, 2022 at 1:25 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > On Fri, Oct 7, 2022 at 10:04 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > On Thu, Oct 6, 2022 at 10:29 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > > Actually, List of Bitmapsets turned out to be something that doesn't\n> > > > just-work with our Node infrastructure, which I found out thanks to\n> > > > -DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\n> > > > first-class support for copy/equal/write/read support for Bitmapsets,\n> > > > such that writeNode() can write appropriately labeled versions of them\n> > > > and nodeRead() can read them as Bitmapsets. That's done in 0003. I\n> > > > didn't actually go ahead and make *all* Bitmapsets in the plan trees\n> > > > to be Nodes, but maybe 0003 can be expanded to do that. We won't need\n> > > > to make gen_node_support.pl emit *_BITMAPSET_FIELD() blurbs then; can\n> > > > just use *_NODE_FIELD().\n> > >\n> > > All meson builds on the cfbot machines seem to have failed, maybe\n> > > because I didn't update src/include/nodes/meson.build to add\n> > > 'nodes/bitmapset.h' to the `node_support_input_i` collection. Here's\n> > > an updated version assuming that's the problem. (Will set up meson\n> > > builds on my machine to avoid this in the future.)\n> >\n> > And... noticed that a postgres_fdw test failed, because\n> > _readBitmapset() not having been changed to set NodeTag would\n> > \"corrupt\" any Bitmapsets that were created with it set.\n>\n> Broke the other cases while fixing the above. Attaching a new version\n> again. In the latest version, I'm setting Bitmapset.type by hand with\n> an XXX comment nearby saying that it would be nice to change that to\n> makeNode(Bitmapset), which I know sounds pretty ad-hoc.\n\nSorry, I attached the wrong patches with the last email. The\n\"correct\" v22 attached this time.\n\nWondering if it might be a good idea to reorder the patches such that\nthe changes that move extraUpdatedCols out of RangeTblEntry (patches\n0003 and 0004) can be considered/applied independently of the changes\nthat move permission-checking-related fields out of RangeTblEntry\n(patch 0001). The former seem more straightforward except of course\nthe Bitmapset node infrastructure changes.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 7 Oct 2022 16:31:16 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On 06.10.22 15:29, Amit Langote wrote:\n> I tried in the attached 0004. ModifyTable gets a new member\n> extraUpdatedColsBitmaps, which is List of Bitmapset \"nodes\".\n> \n> Actually, List of Bitmapsets turned out to be something that doesn't\n> just-work with our Node infrastructure, which I found out thanks to\n> -DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\n> first-class support for copy/equal/write/read support for Bitmapsets,\n> such that writeNode() can write appropriately labeled versions of them\n> and nodeRead() can read them as Bitmapsets. That's done in 0003. I\n> didn't actually go ahead and make*all* Bitmapsets in the plan trees\n> to be Nodes, but maybe 0003 can be expanded to do that. We won't need\n> to make gen_node_support.pl emit *_BITMAPSET_FIELD() blurbs then; can\n> just use *_NODE_FIELD().\n\nSeeing that on 64-bit platforms we have a 4-byte padding gap in the \nBitmapset struct, sticking a node tag in there seems pretty sensible. \nSo turning Bitmapset into a proper Node and then making the other \nadjustments you describe makes sense to me.\n\nMaking a new thread about this might be best.\n\n(I can't currently comment on the rest of the patch set. So I don't \nknow if you'll really end up needing lists of bitmapsets. But from here \nit looks like turning bitmapsets into nodes might be a worthwhile change \njust by itself.)\n\n\n\n", "msg_date": "Wed, 12 Oct 2022 15:49:56 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, Oct 12, 2022 at 10:50 PM Peter Eisentraut\n<peter.eisentraut@enterprisedb.com> wrote:\n> On 06.10.22 15:29, Amit Langote wrote:\n> > I tried in the attached 0004. ModifyTable gets a new member\n> > extraUpdatedColsBitmaps, which is List of Bitmapset \"nodes\".\n> >\n> > Actually, List of Bitmapsets turned out to be something that doesn't\n> > just-work with our Node infrastructure, which I found out thanks to\n> > -DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\n> > first-class support for copy/equal/write/read support for Bitmapsets,\n> > such that writeNode() can write appropriately labeled versions of them\n> > and nodeRead() can read them as Bitmapsets. That's done in 0003. I\n> > didn't actually go ahead and make*all* Bitmapsets in the plan trees\n> > to be Nodes, but maybe 0003 can be expanded to do that. We won't need\n> > to make gen_node_support.pl emit *_BITMAPSET_FIELD() blurbs then; can\n> > just use *_NODE_FIELD().\n>\n> Seeing that on 64-bit platforms we have a 4-byte padding gap in the\n> Bitmapset struct, sticking a node tag in there seems pretty sensible.\n> So turning Bitmapset into a proper Node and then making the other\n> adjustments you describe makes sense to me.\n>\n> Making a new thread about this might be best.\n>\n> (I can't currently comment on the rest of the patch set. So I don't\n> know if you'll really end up needing lists of bitmapsets. But from here\n> it looks like turning bitmapsets into nodes might be a worthwhile change\n> just by itself.)\n\nOk, thanks. I'll start a new thread about it.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Thu, 13 Oct 2022 17:14:30 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Oct 7, 2022 at 4:31 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Fri, Oct 7, 2022 at 3:49 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > On Fri, Oct 7, 2022 at 1:25 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > On Fri, Oct 7, 2022 at 10:04 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > > On Thu, Oct 6, 2022 at 10:29 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > > > Actually, List of Bitmapsets turned out to be something that doesn't\n> > > > > just-work with our Node infrastructure, which I found out thanks to\n> > > > > -DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\n> > > > > first-class support for copy/equal/write/read support for Bitmapsets,\n> > > > > such that writeNode() can write appropriately labeled versions of them\n> > > > > and nodeRead() can read them as Bitmapsets. That's done in 0003. I\n> > > > > didn't actually go ahead and make *all* Bitmapsets in the plan trees\n> > > > > to be Nodes, but maybe 0003 can be expanded to do that. We won't need\n> > > > > to make gen_node_support.pl emit *_BITMAPSET_FIELD() blurbs then; can\n> > > > > just use *_NODE_FIELD().\n> > > >\n> > > > All meson builds on the cfbot machines seem to have failed, maybe\n> > > > because I didn't update src/include/nodes/meson.build to add\n> > > > 'nodes/bitmapset.h' to the `node_support_input_i` collection. Here's\n> > > > an updated version assuming that's the problem. (Will set up meson\n> > > > builds on my machine to avoid this in the future.)\n> > >\n> > > And... noticed that a postgres_fdw test failed, because\n> > > _readBitmapset() not having been changed to set NodeTag would\n> > > \"corrupt\" any Bitmapsets that were created with it set.\n> >\n> > Broke the other cases while fixing the above. Attaching a new version\n> > again. In the latest version, I'm setting Bitmapset.type by hand with\n> > an XXX comment nearby saying that it would be nice to change that to\n> > makeNode(Bitmapset), which I know sounds pretty ad-hoc.\n>\n> Sorry, I attached the wrong patches with the last email. The\n> \"correct\" v22 attached this time.\n\nRebased over c037471832.\n\n\n--\nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Sat, 15 Oct 2022 15:00:58 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "2022年10月15日(土) 15:01 Amit Langote <amitlangote09@gmail.com>:\n>\n> On Fri, Oct 7, 2022 at 4:31 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > On Fri, Oct 7, 2022 at 3:49 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > On Fri, Oct 7, 2022 at 1:25 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > > On Fri, Oct 7, 2022 at 10:04 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > > > On Thu, Oct 6, 2022 at 10:29 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > > > > Actually, List of Bitmapsets turned out to be something that doesn't\n> > > > > > just-work with our Node infrastructure, which I found out thanks to\n> > > > > > -DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\n> > > > > > first-class support for copy/equal/write/read support for Bitmapsets,\n> > > > > > such that writeNode() can write appropriately labeled versions of them\n> > > > > > and nodeRead() can read them as Bitmapsets. That's done in 0003. I\n> > > > > > didn't actually go ahead and make *all* Bitmapsets in the plan trees\n> > > > > > to be Nodes, but maybe 0003 can be expanded to do that. We won't need\n> > > > > > to make gen_node_support.pl emit *_BITMAPSET_FIELD() blurbs then; can\n> > > > > > just use *_NODE_FIELD().\n> > > > >\n> > > > > All meson builds on the cfbot machines seem to have failed, maybe\n> > > > > because I didn't update src/include/nodes/meson.build to add\n> > > > > 'nodes/bitmapset.h' to the `node_support_input_i` collection. Here's\n> > > > > an updated version assuming that's the problem. (Will set up meson\n> > > > > builds on my machine to avoid this in the future.)\n> > > >\n> > > > And... noticed that a postgres_fdw test failed, because\n> > > > _readBitmapset() not having been changed to set NodeTag would\n> > > > \"corrupt\" any Bitmapsets that were created with it set.\n> > >\n> > > Broke the other cases while fixing the above. Attaching a new version\n> > > again. In the latest version, I'm setting Bitmapset.type by hand with\n> > > an XXX comment nearby saying that it would be nice to change that to\n> > > makeNode(Bitmapset), which I know sounds pretty ad-hoc.\n> >\n> > Sorry, I attached the wrong patches with the last email. The\n> > \"correct\" v22 attached this time.\n>\n> Rebased over c037471832.\n\nThis entry was marked as \"Needs review\" in the CommitFest app but cfbot\nreports the patch no longer applies.\n\nWe've marked it as \"Waiting on Author\". As CommitFest 2022-11 is\ncurrently underway, this would be an excellent time update the patch.\n\nOnce you think the patchset is ready for review again, you (or any\ninterested party) can move the patch entry forward by visiting\n\n https://commitfest.postgresql.org/40/3224/\n\nand changing the status to \"Needs review\".\n\n\nThanks\n\nIan Barwick\n\n\n", "msg_date": "Fri, 4 Nov 2022 08:46:02 +0900", "msg_from": "Ian Lawrence Barwick <barwick@gmail.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Nov 4, 2022 at 8:46 AM Ian Lawrence Barwick <barwick@gmail.com> wrote:\n> 2022年10月15日(土) 15:01 Amit Langote <amitlangote09@gmail.com>:\n> > On Fri, Oct 7, 2022 at 4:31 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > On Fri, Oct 7, 2022 at 3:49 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > > On Fri, Oct 7, 2022 at 1:25 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > > > On Fri, Oct 7, 2022 at 10:04 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > > > > On Thu, Oct 6, 2022 at 10:29 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > > > > > Actually, List of Bitmapsets turned out to be something that doesn't\n> > > > > > > just-work with our Node infrastructure, which I found out thanks to\n> > > > > > > -DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\n> > > > > > > first-class support for copy/equal/write/read support for Bitmapsets,\n> > > > > > > such that writeNode() can write appropriately labeled versions of them\n> > > > > > > and nodeRead() can read them as Bitmapsets. That's done in 0003. I\n> > > > > > > didn't actually go ahead and make *all* Bitmapsets in the plan trees\n> > > > > > > to be Nodes, but maybe 0003 can be expanded to do that. We won't need\n> > > > > > > to make gen_node_support.pl emit *_BITMAPSET_FIELD() blurbs then; can\n> > > > > > > just use *_NODE_FIELD().\n> > > > > >\n> > > > > > All meson builds on the cfbot machines seem to have failed, maybe\n> > > > > > because I didn't update src/include/nodes/meson.build to add\n> > > > > > 'nodes/bitmapset.h' to the `node_support_input_i` collection. Here's\n> > > > > > an updated version assuming that's the problem. (Will set up meson\n> > > > > > builds on my machine to avoid this in the future.)\n> > > > >\n> > > > > And... noticed that a postgres_fdw test failed, because\n> > > > > _readBitmapset() not having been changed to set NodeTag would\n> > > > > \"corrupt\" any Bitmapsets that were created with it set.\n> > > >\n> > > > Broke the other cases while fixing the above. Attaching a new version\n> > > > again. In the latest version, I'm setting Bitmapset.type by hand with\n> > > > an XXX comment nearby saying that it would be nice to change that to\n> > > > makeNode(Bitmapset), which I know sounds pretty ad-hoc.\n> > >\n> > > Sorry, I attached the wrong patches with the last email. The\n> > > \"correct\" v22 attached this time.\n> >\n> > Rebased over c037471832.\n>\n> This entry was marked as \"Needs review\" in the CommitFest app but cfbot\n> reports the patch no longer applies.\n>\n> We've marked it as \"Waiting on Author\". As CommitFest 2022-11 is\n> currently underway, this would be an excellent time update the patch.\n\nThanks for the heads up.\n\n> Once you think the patchset is ready for review again, you (or any\n> interested party) can move the patch entry forward by visiting\n>\n> https://commitfest.postgresql.org/40/3224/\n>\n> and changing the status to \"Needs review\".\n\nRebased patch attached and done.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 7 Nov 2022 16:03:45 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Hello\n\nI've been trying to understand what 0001 does. One thing that strikes\nme is that it seems like it'd be easy to have bugs because of modifying\nthe perminfo list inadequately. I couldn't find any cross-check that\nsome perminfo element that we obtain for a rte does actually match the\nrelation we wanted to check. Maybe we could add a test in some central\nplace that perminfo->relid equals rte->relid?\n\nA related point is that concatenating lists doesn't seem to worry about\nnot processing one element multiple times and ending up with bogus offsets.\n(I suppose you still have to let an element be processed multiple times\nin case you have nested subqueries? I wonder how good is the test\ncoverage for such scenarios.)\n\nWhy do callers of add_rte_to_flat_rtable() have to modify the rte's\nperminfoindex themselves, instead of having the function do it for them?\nThat looks strange. But also it's odd that flatten_unplanned_rtes\nconcatenates the two lists after all the perminfoindexes have been\nmodified, rather than doing both things (adding each RTEs perminfo to\nthe global list and offsetting the index) as we walk the list, in\nflatten_rtes_walker. It looks like these two actions are disconnected\nfrom one another, but unless I misunderstand, in reality the opposite is\ntrue.\n\nI think the API of ConcatRTEPermissionInfoLists is a bit weird. Why not\nhave the function return the resulting list instead, just like\nlist_append? It is more verbose, but it seems easier to grok.\n\nTwo trivial changes attached. (Maybe 0002 is not correct, if you're\nalso trying to reference finalrtepermlist; but in that case I think the\noriginal may have been misleading as well.)\n\n-- \nÁlvaro Herrera 48°01'N 7°57'E — https://www.EnterpriseDB.com/", "msg_date": "Thu, 10 Nov 2022 12:58:01 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On 2022-Oct-06, Amit Langote wrote:\n\n> Actually, List of Bitmapsets turned out to be something that doesn't\n> just-work with our Node infrastructure, which I found out thanks to\n> -DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\n> first-class support for copy/equal/write/read support for Bitmapsets,\n> such that writeNode() can write appropriately labeled versions of them\n> and nodeRead() can read them as Bitmapsets. That's done in 0003. I\n> didn't actually go ahead and make *all* Bitmapsets in the plan trees\n> to be Nodes, but maybe 0003 can be expanded to do that. We won't need\n> to make gen_node_support.pl emit *_BITMAPSET_FIELD() blurbs then; can\n> just use *_NODE_FIELD().\n\nHmm, is this related to what Tom posted as part of his 0004 in\nhttps://postgr.es/m/2901865.1667685211@sss.pgh.pa.us\n\n-- \nÁlvaro Herrera Breisgau, Deutschland — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Thu, 10 Nov 2022 13:19:51 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> On 2022-Oct-06, Amit Langote wrote:\n>> Actually, List of Bitmapsets turned out to be something that doesn't\n>> just-work with our Node infrastructure, which I found out thanks to\n>> -DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\n>> first-class support for copy/equal/write/read support for Bitmapsets,\n\n> Hmm, is this related to what Tom posted as part of his 0004 in\n> https://postgr.es/m/2901865.1667685211@sss.pgh.pa.us\n\nIt could be. For some reason I thought that Amit had withdrawn\nhis proposal to make Bitmapsets be Nodes. But if it's still live,\nthen the data structure I invented in my 0004 could plausibly be\nreplaced by a List of Bitmapsets.\n\nThe code I was using that for would rather have fixed-size arrays\nof Bitmapsets than variable-size Lists, mainly because it always\nknows ab initio what the max length of the array will be. But\nI don't think that the preference is so strong that it justifies\na private data structure.\n\nThe main thing I was wondering about in connection with that\nwas whether to assume that there could be other future applications\nof the logic to perform multi-bitmapset union, intersection,\netc. If so, then I'd be inclined to choose different naming and\nput those functions in or near to bitmapset.c. It doesn't look\nlike Amit's code needs anything like that, but maybe somebody\nhas an idea about other applications?\n\nAnyway, I concur with Peter's upthread comment that making\nBitmapsets be Nodes is probably justifiable all by itself.\nThe lack of a Node tag in them now is just because in a 32-bit\nworld it seemed like unnecessary bloat. But on 64-bit machines\nit's free, and we aren't optimizing for 32-bit any more.\n\nI do not like the details of v24-0003 at all though, because\nit seems to envision that a \"node Bitmapset\" is a different\nthing from a raw Bitmapset. That can only lead to bugs ---\nwhy would we not make it the case that every Bitmapset is\nproperly labeled with the node tag?\n\nAlso, although I'm on board with making Bitmapsets be Nodes,\nI don't think I'm on board with changing their dump format.\nPlanner node dumps would get enormously bulkier and less\nreadable if we changed things like\n\n :relids (b 1 2)\n\nto\n\n :relids\n {BITMAPSET\n (b 1 2)\n }\n\nor whatever the output would look like as the patch stands.\nSo that needs a bit more effort, but it's surely manageable.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 11 Nov 2022 11:46:16 -0500", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Sat, Nov 12, 2022 at 1:46 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> > On 2022-Oct-06, Amit Langote wrote:\n> >> Actually, List of Bitmapsets turned out to be something that doesn't\n> >> just-work with our Node infrastructure, which I found out thanks to\n> >> -DWRITE_READ_PARSE_PLAN_TREES. So, I had to go ahead and add\n> >> first-class support for copy/equal/write/read support for Bitmapsets,\n>\n> > Hmm, is this related to what Tom posted as part of his 0004 in\n> > https://postgr.es/m/2901865.1667685211@sss.pgh.pa.us\n>\n> It could be. For some reason I thought that Amit had withdrawn\n> his proposal to make Bitmapsets be Nodes.\n\nI think you are referring to [1] that I had forgotten to link to here.\nI did reimplement a data structure in my patch on the \"Re: generic\nplans and initial pruning\" thread to stop using a List of Bitmapsets,\nso the Bitmapset as Nodes functionality became unnecessary there,\nthough I still need it for the proposal here to move\nextraUpdatedColumns (patch 0004) into ModifyTable node.\n\n> The code I was using that for would rather have fixed-size arrays\n> of Bitmapsets than variable-size Lists, mainly because it always\n> knows ab initio what the max length of the array will be. But\n> I don't think that the preference is so strong that it justifies\n> a private data structure.\n>\n> The main thing I was wondering about in connection with that\n> was whether to assume that there could be other future applications\n> of the logic to perform multi-bitmapset union, intersection,\n> etc. If so, then I'd be inclined to choose different naming and\n> put those functions in or near to bitmapset.c. It doesn't look\n> like Amit's code needs anything like that, but maybe somebody\n> has an idea about other applications?\n\nYes, simple storage of multiple Bitmapsets in a List somewhere in a\nparse/plan tree sounded like that would have wider enough use to add\nproper node support for. Assuming you mean trying to generalize\nVarAttnoSet in your patch 0004 posted at [2], I wonder if you want to\nsomehow make its indexability by varno / RT index a part of the\ninterface of the generic code you're thinking for it? For example,\nvarattnoset_*_members collection of routines in that patch seem to\nassume that the Bitmapsets at a given index in the provided pair of\nVarAttnoSets are somehow related -- covering to the same base relation\nin this case. That does not sound very generalizable but maybe that\nis not what you are thinking at all.\n\n> Anyway, I concur with Peter's upthread comment that making\n> Bitmapsets be Nodes is probably justifiable all by itself.\n> The lack of a Node tag in them now is just because in a 32-bit\n> world it seemed like unnecessary bloat. But on 64-bit machines\n> it's free, and we aren't optimizing for 32-bit any more.\n>\n> I do not like the details of v24-0003 at all though, because\n> it seems to envision that a \"node Bitmapset\" is a different\n> thing from a raw Bitmapset. That can only lead to bugs ---\n> why would we not make it the case that every Bitmapset is\n> properly labeled with the node tag?\n\nYeah, I just didn't think hard enough to realize that having\nbitmapset.c itself set the node tag is essentially free, and it looks\nlike a better design anyway than the design where callers get to\nchoose to make the bitmapset they are manipulating a Node or not.\n\n> Also, although I'm on board with making Bitmapsets be Nodes,\n> I don't think I'm on board with changing their dump format.\n> Planner node dumps would get enormously bulkier and less\n> readable if we changed things like\n>\n> :relids (b 1 2)\n>\n> to\n>\n> :relids\n> {BITMAPSET\n> (b 1 2)\n> }\n>\n> or whatever the output would look like as the patch stands.\n> So that needs a bit more effort, but it's surely manageable.\n\nAgreed with leaving the dump format unchanged or not bloating it.\n\nThanks a lot for 5e1f3b9ebf6e5.\n\n--\nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\n[1] https://www.postgresql.org/message-id/CA+HiwqG8L3DVoZauJi1-eorLnnoM6VcfJCCauQX8=ofi-qMYCQ@mail.gmail.com\n[2] https://www.postgresql.org/message-id/2901865.1667685211%40sss.pgh.pa.us\n\n\n", "msg_date": "Mon, 14 Nov 2022 16:32:43 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On 2022-Nov-10, Alvaro Herrera wrote:\n\n> I couldn't find any cross-check that\n> some perminfo element that we obtain for a rte does actually match the\n> relation we wanted to check. Maybe we could add a test in some central\n> place that perminfo->relid equals rte->relid?\n\nI hadn't looked hard enough. This is already in GetRTEPermissionInfo().\n\n\n> A related point is that concatenating lists doesn't seem to worry about\n> not processing one element multiple times and ending up with bogus offsets.\n\n> I think the API of ConcatRTEPermissionInfoLists is a bit weird. Why not\n> have the function return the resulting list instead, just like\n> list_append? It is more verbose, but it seems easier to grok.\n\nAnother point related to this. I noticed that everyplace we do\nConcatRTEPermissionInfoLists, it is followed by list_append'ing the RT\nlist themselves. This is strange. Maybe that's the wrong way to look\nat this, and instead we should have a function that does both things\ntogether: pass both rtables and rtepermlists and smash them all\ntogether.\n\nI attach your 0001 again with a bunch of other fixups (I don't include\nyour 0002ff). I've pushed this to see the CI results, and so far it's\nlooking good (hasn't finished yet though):\nhttps://cirrus-ci.com/build/5126818977021952\n\nI'll have a look at 0002 now.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/", "msg_date": "Wed, 16 Nov 2022 12:44:02 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Hi Alvaro,\n\nOn Thu, Nov 10, 2022 at 8:58 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> Hello\n>\n> I've been trying to understand what 0001 does.\n\nThanks a lot for taking a look.\n\n> A related point is that concatenating lists doesn't seem to worry about\n> not processing one element multiple times and ending up with bogus offsets.\n\nCould you please clarify what you mean by \"an element\" here? Are you\nare implying that any given relation, no matter how many times it\noccurs in a query (possibly via view rule expansion), should end up\nwith only one RTEPermissionInfo node covering all its occurrences in\nthe combined/final rtepermlist, as a result of concatenation/merging\nof rtepermlists of different Queries? Versions of the patch prior to\nv17 did it that way, but Tom didn't like that approach much [1], so I\nswitched to the current implementation where the merging of two\nQueries' range tables using list_concat() is accompanied by the\nmerging of their rtepermlists, again, using list_concat(). So, if the\nsame table has multiple RTEs in a query, so will there be multiple\ncorresponding RTEPermissionInfos.\n\n> (I suppose you still have to let an element be processed multiple times\n> in case you have nested subqueries? I wonder how good is the test\n> coverage for such scenarios.)\n\nISTM the existing tests cover a good portion of the changes being made\nhere, but I guess I'm only saying that because I have spent a\nnon-trivial amount of time debugging the test failures across many\nfiles over different versions of the patch, especially those that\ninvolve views.\n\nDo you think it might be better to add some new tests as part of this\npatch than simply relying on the existing tests not failing?\n\n> Why do callers of add_rte_to_flat_rtable() have to modify the rte's\n> perminfoindex themselves, instead of having the function do it for them?\n> That looks strange. But also it's odd that flatten_unplanned_rtes\n> concatenates the two lists after all the perminfoindexes have been\n> modified, rather than doing both things (adding each RTEs perminfo to\n> the global list and offsetting the index) as we walk the list, in\n> flatten_rtes_walker. It looks like these two actions are disconnected\n> from one another, but unless I misunderstand, in reality the opposite is\n> true.\n\nOK, I have moved the step of updating perminfoindex into\nadd_rte_to_flat_rtable(), where it looks up the RTEPermissionInfo for\nan RTE being added using GetRTEPermissionInfo() and lappend()'s it to\nfinalrtepermlist before updating the index. For flatten_rtes_walker()\nthen to rely on that facility, I needed to make some changes to its\narguments to pass the correct Query node to pick the rtepermlist from.\nOverall, setrefs.c changes now hopefully look saner than in the last\nversion.\n\nAs soon as I made that change, I noticed a bunch of ERRORs in\nregression tests due to the checks in GetRTEPermissionInfo(), though\nnone that looked like live bugs. Though I did find some others as I\nwas reworking the code to fix those errors, which I have fixed too.\n\n> I think the API of ConcatRTEPermissionInfoLists is a bit weird. Why not\n> have the function return the resulting list instead, just like\n> list_append? It is more verbose, but it seems easier to grok.\n\nAgreed, I have merged your delta patch into 0001.\n\nOn Wed, Nov 16, 2022 at 8:44 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > A related point is that concatenating lists doesn't seem to worry about\n> > not processing one element multiple times and ending up with bogus offsets.\n>\n> > I think the API of ConcatRTEPermissionInfoLists is a bit weird. Why not\n> > have the function return the resulting list instead, just like\n> > list_append? It is more verbose, but it seems easier to grok.\n>\n> Another point related to this. I noticed that everyplace we do\n> ConcatRTEPermissionInfoLists, it is followed by list_append'ing the RT\n> list themselves. This is strange. Maybe that's the wrong way to look\n> at this, and instead we should have a function that does both things\n> together: pass both rtables and rtepermlists and smash them all\n> together.\n\nOK, how does the attached 0002 look in that regard? In it, I have\nrenamed ConcatRTEPermissionInfoLists() to CombineRangeTables() which\ndoes all that. Though, given the needs of rewriteRuleAction(), the\nAPI of it may look a bit weird. (Only posting it separately for the\nease of comparison.)\n\n> I attach your 0001 again with a bunch of other fixups (I don't include\n> your 0002ff). I've pushed this to see the CI results, and so far it's\n> looking good (hasn't finished yet though):\n> https://cirrus-ci.com/build/5126818977021952\n\nI have merged all.\n\nWhile working on these changes, I realized that 0002 (the patch to\nremove OLD/NEW RTEs from the stored view query's range table) was\ngoing a bit too far by removing UpdateRangeTableOfViewParse()\naltogether. You may have noticed that a RTE_RELATION entry for the\nview relation is needed anyway for permission checking, locking, etc.\nand the patch was making the rewriter add one explicitly, whereas the\nOLD RTE would be playing that role previously. In the updated\nversion, I have decided to keep UpdateRangeTableOfViewParse() while\nremoving the code in it that adds the NEW RTE, which is totally\nunnecessary. Also removed the rewriter changes that were needed\npreviously. Most of the previous version of the patch was a whole\nbunch of regression test output changes, because the stored view\nquery's range table would be changed such that deparsed version of\nthose queries need no longer qualify output columns (only 1 entry in\nthe range table in those cases), though I didn't necessarily think\nthat that looked better. In the new version, because the stored view\nquery contains the OLD entry, those qualifications stay, and so none\nof the regression test changes are necessary anymore. postgres_fdw\nones are unrelated and noted in the commit message.\n\n[1] https://www.postgresql.org/message-id/3094251.1658955855%40sss.pgh.pa.us\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 21 Nov 2022 21:03:43 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Mon, Nov 21, 2022 at 9:03 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Thu, Nov 10, 2022 at 8:58 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > Why do callers of add_rte_to_flat_rtable() have to modify the rte's\n> > perminfoindex themselves, instead of having the function do it for them?\n> > That looks strange. But also it's odd that flatten_unplanned_rtes\n> > concatenates the two lists after all the perminfoindexes have been\n> > modified, rather than doing both things (adding each RTEs perminfo to\n> > the global list and offsetting the index) as we walk the list, in\n> > flatten_rtes_walker. It looks like these two actions are disconnected\n> > from one another, but unless I misunderstand, in reality the opposite is\n> > true.\n>\n> OK, I have moved the step of updating perminfoindex into\n> add_rte_to_flat_rtable(), where it looks up the RTEPermissionInfo for\n> an RTE being added using GetRTEPermissionInfo() and lappend()'s it to\n> finalrtepermlist before updating the index. For flatten_rtes_walker()\n> then to rely on that facility, I needed to make some changes to its\n> arguments to pass the correct Query node to pick the rtepermlist from.\n> Overall, setrefs.c changes now hopefully look saner than in the last\n> version.\n>\n> As soon as I made that change, I noticed a bunch of ERRORs in\n> regression tests due to the checks in GetRTEPermissionInfo(), though\n> none that looked like live bugs.\n\nI figured the no-live-bugs part warrants some clarification. The\nplan-time errors that I saw were caused in many cases by an RTE not\npointing into the correct list or having incorrect perminfoindex, most\nor all of those cases involving views. Passing a wrong perminfoindex\nto the executor, though obviously not good and fixed in the latest\nversion, wasn't a problem in those cases, because none of those tests\nwould cause the executor to use the perminfoindex, such as by calling\nGetRTEPermissionInfo(). I thought about that being problematic in\nterms of our coverage of perminfoindex related code in the executor,\nbut don't really see how we could improve that situation as far as\nviews are concerned, because the executor is only concerned about\nchecking permissions for views and perminfoindex is irrelevant in that\npath, because the RTEPermissionInfos are accessed directly from\nes_rtepermlist.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Mon, 21 Nov 2022 21:46:03 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Mon, Nov 21, 2022 at 9:03 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Wed, Nov 16, 2022 at 8:44 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > > A related point is that concatenating lists doesn't seem to worry about\n> > > not processing one element multiple times and ending up with bogus offsets.\n> >\n> > > I think the API of ConcatRTEPermissionInfoLists is a bit weird. Why not\n> > > have the function return the resulting list instead, just like\n> > > list_append? It is more verbose, but it seems easier to grok.\n> >\n> > Another point related to this. I noticed that everyplace we do\n> > ConcatRTEPermissionInfoLists, it is followed by list_append'ing the RT\n> > list themselves. This is strange. Maybe that's the wrong way to look\n> > at this, and instead we should have a function that does both things\n> > together: pass both rtables and rtepermlists and smash them all\n> > together.\n>\n> OK, how does the attached 0002 look in that regard? In it, I have\n> renamed ConcatRTEPermissionInfoLists() to CombineRangeTables() which\n> does all that. Though, given the needs of rewriteRuleAction(), the\n> API of it may look a bit weird. (Only posting it separately for the\n> ease of comparison.)\n\nHere's a revised version in which I've revised the code near the call\nsite of CombineRangeTables() in rewriteRuleAction() such that the\nweirdness of that API in the last version becomes unnecessary. When\ndoing those changes, I realized that we perhaps need some new tests to\nexercise rewriteRuleAction(), especially to test the order of checking\npermissions present in the (combined) range table of rewritten action\nquery, though I have not added them yet.\n\nI've included a new patch (0002) that I've also posted at [1] for this\npatch set to compile/work.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\n[1] https://www.postgresql.org/message-id/CA%2BHiwqHbv4xQd-yHx0LWA04AybA%2BGQPy66UJxt8m32gB6zCYQQ%40mail.gmail.com", "msg_date": "Fri, 25 Nov 2022 20:28:37 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Thanks for the new version, in particular thank you for fixing the\nannoyance with the CombineRangeTables API.\n\n0002 was already pushed upstream, so we can forget about it. I also\npushed the addition of missing_ok to build_attrmap_by_name{,_if_req}.\nSo this series needed a refresh, which is attached here, and tests are\nrunning: https://cirrus-ci.com/build/4880219807416320\n\n\nAs for 0001+0003, here it is once again with a few fixups. There are\ntwo nontrivial changes here:\n\n1. in get_rel_all_updated_cols (née GetRelAllUpdatedCols, which I\nchanged because it didn't match the naming style in inherits.c) you were\ndoing determining the relid to use in a roundabout way, then asserting\nit is a value you already know:\n\n- use_relid = rel->top_parent_relids == NULL ? rel->relid :\n- bms_singleton_member(rel->top_parent_relids);\n- Assert(use_relid == root->parse->resultRelation);\n\nWhy not just use root->parse->resultRelation in the first place?\nMy 0002 does that.\n\n2. my 0005 moves a block in add_rte_to_flat_rtable one level out:\nthere's no need to put it inside the rtekind == RTE_RELATION block, and\nthe comment in that block failed to mention that we copied the\nRTEPermissionInfo; we can just let it work on the 'perminfoindex > 0'\ncondition. Also, the comment is a bit misleading, and I changed it\nsome, but maybe not sufficiently: after add_rte_to_flat_rtable, the same\nRTEPermissionInfo node will serve two RTEs: one in the Query struct,\nwhose perminfoindex corresponds to Query->rtepermlist; and the other in\nPlannerGlobal->finalrtable, whose index corresponds to\nPlannerGlobal->finalrtepermlist. I was initially thinking that the old\nRTE would result in a \"corrupted\" state, but that doesn't appear to be\nthe case. (Also: I made it grab the RTEPermissionInfo using\nrte->perminfoindex, not newrte->perminfoindex, because that seems\nslightly bogus, even if they are identical because of the memcpy.)\n\nThe other changes are cosmetic.\n\nI do not include here your 0004 and 0005. (I think we can deal with\nthose separately later.)\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/", "msg_date": "Tue, 29 Nov 2022 10:27:08 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Hi Alvaro,\n\nThanks for taking a look and all the fixup patches. Was working on\nthat test I said we should add and then was spending some time\ncleaning things up and breaking some things out into their patches,\nmainly for the ease of review.\n\nOn Tue, Nov 29, 2022 at 6:27 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> Thanks for the new version, in particular thank you for fixing the\n> annoyance with the CombineRangeTables API.\n\nOK, now that you seem to think that looks good, I've merged it into\nthe main patch.\n\n> 0002 was already pushed upstream, so we can forget about it. I also\n> pushed the addition of missing_ok to build_attrmap_by_name{,_if_req}.\n\nYeah, I thought that needed to be broken out and had done so in my\nlocal repo. Thanks for pushing that bit.\n\n> As for 0001+0003, here it is once again with a few fixups. There are\n> two nontrivial changes here:\n>\n> 1. in get_rel_all_updated_cols (née GetRelAllUpdatedCols, which I\n> changed because it didn't match the naming style in inherits.c) you were\n> doing determining the relid to use in a roundabout way, then asserting\n> it is a value you already know:\n>\n> - use_relid = rel->top_parent_relids == NULL ? rel->relid :\n> - bms_singleton_member(rel->top_parent_relids);\n> - Assert(use_relid == root->parse->resultRelation);\n\n> Why not just use root->parse->resultRelation in the first place?\n\nFacepalm, yes.\n\n> My 0002 does that.\n\nMerged.\n\n> 2. my 0005 moves a block in add_rte_to_flat_rtable one level out:\n> there's no need to put it inside the rtekind == RTE_RELATION block, and\n> the comment in that block failed to mention that we copied the\n> RTEPermissionInfo; we can just let it work on the 'perminfoindex > 0'\n> condition.\n\nYes, agree that's better.\n\n> Also, the comment is a bit misleading, and I changed it\n> some, but maybe not sufficiently: after add_rte_to_flat_rtable, the same\n> RTEPermissionInfo node will serve two RTEs: one in the Query struct,\n> whose perminfoindex corresponds to Query->rtepermlist; and the other in\n> PlannerGlobal->finalrtable, whose index corresponds to\n> PlannerGlobal->finalrtepermlist. I was initially thinking that the old\n> RTE would result in a \"corrupted\" state, but that doesn't appear to be\n> the case. (Also: I made it grab the RTEPermissionInfo using\n> rte->perminfoindex, not newrte->perminfoindex, because that seems\n> slightly bogus, even if they are identical because of the memcpy.)\n\nInteresting point about two different RTEs (in different lists)\npointing to the same RTEPermissionInfo, also in different lists.\nMaybe, we should have the following there so that the PlannedStmt's\ncontents don't point into the Query?\n\n newperminfo = copyObject(perminfo);\n\n> The other changes are cosmetic.\n\nThanks, I've merged all. I do wonder that it is only in PlannedStmt\nthat the list is called something that is not \"rtepermlist\", but I'm\nfine with it if you prefer that.\n\n> I do not include here your 0004 and 0005. (I think we can deal with\n> those separately later.)\n\nOK, I have not attached them with this email either.\n\nAs I mentioned above, I've broken a couple of other changes out into\ntheir own patches that I've put before the main patch. 0001 adds\nExecGetRootToChildMap(). I thought it would be better to write in the\ncommit message why the new map is necessary for the main patch. 0002\ncontains changes that has to do with changing how we access\ncheckAsUser in some foreign table planning/execution code sites.\nThought it might be better to describe it separately too.\n\n0003 is the main patch into which I've merged both my patch that\ninvents CombineRangeTables() that I had posted separately before and\nall of your fixups. In it, you will see a new test case that I have\nadded in rules.sql to exercise the permission checking order stuff\nthat I had said I may have broken with this patch, especially the\nhunks that change rewriteRuleAction(). That test would be broken with\nv24, but not after the changes to add_rtes_to_flat_rtable() that I\nmade to address your review comment that blindly list_concat'ing\nfinalrtepermlist and Query's rtepermlist doesn't look very robust,\nwhich it indeed wasn't [1].\n\n--\nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\n[1] So, rewriteRuleAction(), with previous \"wrong\" versions of the\npatch (~v26), would combine the original query's and action query's\nrtepermlists in the \"wrong\" order, that is, not in the order in which\nRTEs appear in the combined rtable. But because\nadd_rtes_to_flat_rtable() now (v26~) adds perminfos into\nfinalrtepermlist in the RTE order using lappend(), that wrongness of\nrewriteRuleAction() would be masked -- no execution-time failure of\nthe test. Anyway, I've also fixed rewriteRuleAction() to be \"correct\"\nin v27, so it is the least wrong version AFAIK ;).", "msg_date": "Tue, 29 Nov 2022 22:37:56 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On 2022-Nov-29, Amit Langote wrote:\n\n> Maybe, we should have the following there so that the PlannedStmt's\n> contents don't point into the Query?\n> \n> newperminfo = copyObject(perminfo);\n\nHmm, I suppose if we want a separate RTEPermissionInfo node, we should\ninstead do GetRTEPermissionInfo(rte) followed by\nAddRTEPermissionInfo(newrte) and avoid the somewhat cowboy-ish coding\nthere.\n\n-- \nÁlvaro Herrera Breisgau, Deutschland — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Tue, 29 Nov 2022 19:04:44 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, Nov 30, 2022 at 3:04 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> On 2022-Nov-29, Amit Langote wrote:\n>\n> > Maybe, we should have the following there so that the PlannedStmt's\n> > contents don't point into the Query?\n> >\n> > newperminfo = copyObject(perminfo);\n>\n> Hmm, I suppose if we want a separate RTEPermissionInfo node, we should\n> instead do GetRTEPermissionInfo(rte) followed by\n> AddRTEPermissionInfo(newrte) and avoid the somewhat cowboy-ish coding\n> there.\n\nOK, something like the attached?\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 30 Nov 2022 11:56:54 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, Nov 30, 2022 at 11:56 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Wed, Nov 30, 2022 at 3:04 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > On 2022-Nov-29, Amit Langote wrote:\n> >\n> > > Maybe, we should have the following there so that the PlannedStmt's\n> > > contents don't point into the Query?\n> > >\n> > > newperminfo = copyObject(perminfo);\n> >\n> > Hmm, I suppose if we want a separate RTEPermissionInfo node, we should\n> > instead do GetRTEPermissionInfo(rte) followed by\n> > AddRTEPermissionInfo(newrte) and avoid the somewhat cowboy-ish coding\n> > there.\n>\n> OK, something like the attached?\n\nThinking more about the patch I sent, which has this:\n\n+ /* Get the existing one from this query's rtepermlist. */\n perminfo = GetRTEPermissionInfo(rtepermlist, newrte);\n- glob->finalrtepermlist = lappend(glob->finalrtepermlist, perminfo);\n- newrte->perminfoindex = list_length(glob->finalrtepermlist);\n+\n+ /*\n+ * Add a new one to finalrtepermlist and copy the contents of the\n+ * existing one into it. Note that AddRTEPermissionInfo() also\n+ * updates newrte->perminfoindex to point to newperminfo in\n+ * finalrtepermlist.\n+ */\n+ newrte->perminfoindex = 0; /* expected by AddRTEPermissionInfo() */\n+ newperminfo = AddRTEPermissionInfo(&glob->finalrtepermlist, newrte);\n+ memcpy(newperminfo, perminfo, sizeof(RTEPermissionInfo));\n\nNote that simple memcpy'ing would lead to the selectedCols, etc.\nbitmapsets being shared between the Query and the PlannedStmt, which\nmay be considered as not good. But maybe that's fine, because the\nsame is true for RangeTblEntry members that do have substructure such\nas the various Alias fields that are not reset? Code paths that like\nto keep a PlannedStmt to be decoupled from the corresponding Query,\nsuch as plancache.c, do copy the former, so shared sub-structure in\nthe default case may be fine after all.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Wed, 30 Nov 2022 15:45:53 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Hello\n\nOn 2022-Nov-29, Amit Langote wrote:\n\n> Thanks for taking a look and all the fixup patches. Was working on\n> that test I said we should add and then was spending some time\n> cleaning things up and breaking some things out into their patches,\n> mainly for the ease of review.\n\nRight, excellent. Thanks for this new version. It looks pretty good to\nme now.\n\n> On Tue, Nov 29, 2022 at 6:27 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n\n> > The other changes are cosmetic.\n> \n> Thanks, I've merged all. I do wonder that it is only in PlannedStmt\n> that the list is called something that is not \"rtepermlist\", but I'm\n> fine with it if you prefer that.\n\nI was unsure about that one myself; I just changed it because that\nstruct uses camelCaseNaming, which the others do not, so it seemed fine\nin the other places but not there. As for changing \"list\" to \"infos\",\nit seems to me we tend to avoid naming a list as \"list\", so. (Maybe I\nwould change the others to be foo_rteperminfos. Unless these naming\nchoices were already bikeshedded to its present form upthread and I\nmissed it?)\n\n> As I mentioned above, I've broken a couple of other changes out into\n> their own patches that I've put before the main patch. 0001 adds\n> ExecGetRootToChildMap(). I thought it would be better to write in the\n> commit message why the new map is necessary for the main patch.\n\nI was thinking about this one and it seemed too closely tied to\nExecGetInsertedCols to be committed separately. Notice how there is a\ncomment that mentions that function in your 0001, but that function\nitself still uses ri_RootToPartitionMap, so before your 0003 the comment\nis bogus. And there's now quite some duplicity between\nri_RootToPartitionMap and ri_RootToChildMap, which I think it would be\nbetter to reduce. I mean, rather than add a new field it would be\nbetter to repurpose the old one:\n\n- ExecGetRootToChildMap should return TupleConversionMap *\n- every place that accesses ri_RootToPartitionMap directly should be\n using ExecGetRootToChildMap() instead\n- ExecGetRootToChildMap passes build_attrmap_by_name_if_req\n !resultRelInfo->ri_RelationDesc->rd_rel->relispartition\n as third argument to build_attrmap_by_name_if_req (rather than\n constant true), so that we keep the tuple compatibility checking we\n have there currently.\n\n\n> 0002 contains changes that has to do with changing how we access\n> checkAsUser in some foreign table planning/execution code sites.\n> Thought it might be better to describe it separately too.\n\nI'll get this one pushed soon, it seems good to me. (I'll edit to not\nuse Oid as boolean.)\n\n-- \nÁlvaro Herrera Breisgau, Deutschland — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Wed, 30 Nov 2022 09:32:15 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Hello,\n\nThis didn't apply, so I rebased it on current master, excluding the one\nI already pushed. No further changes.\n\n-- \nÁlvaro Herrera Breisgau, Deutschland — https://www.EnterpriseDB.com/\n\"No me acuerdo, pero no es cierto. No es cierto, y si fuera cierto,\n no me acuerdo.\" (Augusto Pinochet a una corte de justicia)", "msg_date": "Thu, 1 Dec 2022 11:49:54 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Hi Alvaro,\n\nOn Wed, Nov 30, 2022 at 5:32 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > On Tue, Nov 29, 2022 at 6:27 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > Thanks, I've merged all. I do wonder that it is only in PlannedStmt\n> > that the list is called something that is not \"rtepermlist\", but I'm\n> > fine with it if you prefer that.\n>\n> I was unsure about that one myself; I just changed it because that\n> struct uses camelCaseNaming, which the others do not, so it seemed fine\n> in the other places but not there. As for changing \"list\" to \"infos\",\n> it seems to me we tend to avoid naming a list as \"list\", so. (Maybe I\n> would change the others to be foo_rteperminfos. Unless these naming\n> choices were already bikeshedded to its present form upthread and I\n> missed it?)\n\nNo, I think it was I who came up with the \"..list\" naming and\nbasically just stuck with it.\n\nActually, I don't mind changing to \"...infos\", which I have done in\nthe attached updated patch.\n\n> > As I mentioned above, I've broken a couple of other changes out into\n> > their own patches that I've put before the main patch. 0001 adds\n> > ExecGetRootToChildMap(). I thought it would be better to write in the\n> > commit message why the new map is necessary for the main patch.\n>\n> I was thinking about this one and it seemed too closely tied to\n> ExecGetInsertedCols to be committed separately. Notice how there is a\n> comment that mentions that function in your 0001, but that function\n> itself still uses ri_RootToPartitionMap, so before your 0003 the comment\n> is bogus. And there's now quite some duplicity between\n> ri_RootToPartitionMap and ri_RootToChildMap, which I think it would be\n> better to reduce. I mean, rather than add a new field it would be\n> better to repurpose the old one:\n>\n> - ExecGetRootToChildMap should return TupleConversionMap *\n> - every place that accesses ri_RootToPartitionMap directly should be\n> using ExecGetRootToChildMap() instead\n> - ExecGetRootToChildMap passes build_attrmap_by_name_if_req\n> !resultRelInfo->ri_RelationDesc->rd_rel->relispartition\n> as third argument to build_attrmap_by_name_if_req (rather than\n> constant true), so that we keep the tuple compatibility checking we\n> have there currently.\n\nThis sounds like a better idea than adding a new AttrMap, so done this\nway in the attached 0001.\n\n> > 0002 contains changes that has to do with changing how we access\n> > checkAsUser in some foreign table planning/execution code sites.\n> > Thought it might be better to describe it separately too.\n>\n> I'll get this one pushed soon, it seems good to me. (I'll edit to not\n> use Oid as boolean.)\n\nThanks for committing that one.\n\nI've also merged into 0002 the delta patch I had posted earlier to add\na copy of RTEPermInfos into the flattened permInfos list instead of\nadding the Query's copy.\n\n--\nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 2 Dec 2022 16:41:11 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Dec 2, 2022 at 4:41 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> Hi Alvaro,\n>\n> On Wed, Nov 30, 2022 at 5:32 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > > On Tue, Nov 29, 2022 at 6:27 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > > Thanks, I've merged all. I do wonder that it is only in PlannedStmt\n> > > that the list is called something that is not \"rtepermlist\", but I'm\n> > > fine with it if you prefer that.\n> >\n> > I was unsure about that one myself; I just changed it because that\n> > struct uses camelCaseNaming, which the others do not, so it seemed fine\n> > in the other places but not there. As for changing \"list\" to \"infos\",\n> > it seems to me we tend to avoid naming a list as \"list\", so. (Maybe I\n> > would change the others to be foo_rteperminfos. Unless these naming\n> > choices were already bikeshedded to its present form upthread and I\n> > missed it?)\n>\n> No, I think it was I who came up with the \"..list\" naming and\n> basically just stuck with it.\n>\n> Actually, I don't mind changing to \"...infos\", which I have done in\n> the attached updated patch.\n>\n> > > As I mentioned above, I've broken a couple of other changes out into\n> > > their own patches that I've put before the main patch. 0001 adds\n> > > ExecGetRootToChildMap(). I thought it would be better to write in the\n> > > commit message why the new map is necessary for the main patch.\n> >\n> > I was thinking about this one and it seemed too closely tied to\n> > ExecGetInsertedCols to be committed separately. Notice how there is a\n> > comment that mentions that function in your 0001, but that function\n> > itself still uses ri_RootToPartitionMap, so before your 0003 the comment\n> > is bogus. And there's now quite some duplicity between\n> > ri_RootToPartitionMap and ri_RootToChildMap, which I think it would be\n> > better to reduce. I mean, rather than add a new field it would be\n> > better to repurpose the old one:\n> >\n> > - ExecGetRootToChildMap should return TupleConversionMap *\n> > - every place that accesses ri_RootToPartitionMap directly should be\n> > using ExecGetRootToChildMap() instead\n> > - ExecGetRootToChildMap passes build_attrmap_by_name_if_req\n> > !resultRelInfo->ri_RelationDesc->rd_rel->relispartition\n> > as third argument to build_attrmap_by_name_if_req (rather than\n> > constant true), so that we keep the tuple compatibility checking we\n> > have there currently.\n>\n> This sounds like a better idea than adding a new AttrMap, so done this\n> way in the attached 0001.\n>\n> > > 0002 contains changes that has to do with changing how we access\n> > > checkAsUser in some foreign table planning/execution code sites.\n> > > Thought it might be better to describe it separately too.\n> >\n> > I'll get this one pushed soon, it seems good to me. (I'll edit to not\n> > use Oid as boolean.)\n>\n> Thanks for committing that one.\n>\n> I've also merged into 0002 the delta patch I had posted earlier to add\n> a copy of RTEPermInfos into the flattened permInfos list instead of\n> adding the Query's copy.\n\nOops, hit send before attaching anything.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 2 Dec 2022 16:44:05 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "Hello,\n\nOn 2022-Dec-02, Amit Langote wrote:\n\n> This sounds like a better idea than adding a new AttrMap, so done this\n> way in the attached 0001.\n\nThanks for doing that! I have pushed it, but I renamed\nri_RootToPartitionMap to ri_RootToChildMap and moved it to another spot\nin ResultRelInfo, which allows to simplify the comments.\n\n> I've also merged into 0002 the delta patch I had posted earlier to add\n> a copy of RTEPermInfos into the flattened permInfos list instead of\n> adding the Query's copy.\n\nGreat. At this point I have no other comments, except that in both\nparse_relation.c and rewriteManip.c you've chosen to add the new\nfunctions at the bottom of each file, which is seldom a good choice.\nI think in the case of CombineRangeTables it should be the very first\nfunction in the file, before all the walker-type stuff; and for\nAdd/GetRTEPermissionInfo I would suggest that right below\naddRangeTableEntryForENR might be a decent choice (need to fix the .h\nfiles to match, of course.)\n\n-- \nÁlvaro Herrera Breisgau, Deutschland — https://www.EnterpriseDB.com/\n\"World domination is proceeding according to plan\" (Andrew Morton)\n\n\n", "msg_date": "Fri, 2 Dec 2022 10:59:54 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Dec 2, 2022 at 7:00 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> On 2022-Dec-02, Amit Langote wrote:\n> > This sounds like a better idea than adding a new AttrMap, so done this\n> > way in the attached 0001.\n>\n> Thanks for doing that! I have pushed it, but I renamed\n> ri_RootToPartitionMap to ri_RootToChildMap and moved it to another spot\n> in ResultRelInfo, which allows to simplify the comments.\n\nThanks.\n\n> > I've also merged into 0002 the delta patch I had posted earlier to add\n> > a copy of RTEPermInfos into the flattened permInfos list instead of\n> > adding the Query's copy.\n>\n> Great. At this point I have no other comments, except that in both\n> parse_relation.c and rewriteManip.c you've chosen to add the new\n> functions at the bottom of each file, which is seldom a good choice.\n> I think in the case of CombineRangeTables it should be the very first\n> function in the file, before all the walker-type stuff; and for\n> Add/GetRTEPermissionInfo I would suggest that right below\n> addRangeTableEntryForENR might be a decent choice (need to fix the .h\n> files to match, of course.)\n\nOkay, I've moved the functions and their .h declarations to the places\nyou suggest. While at it, I also uncapitalized Add/Get, because\nthat's how the nearby functions in the header are named.\n\nThanks again for the review. The patch looks much better than it did\n3 weeks ago.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 2 Dec 2022 20:13:28 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Fri, Dec 2, 2022 at 8:13 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Fri, Dec 2, 2022 at 7:00 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > Great. At this point I have no other comments, except that in both\n> > parse_relation.c and rewriteManip.c you've chosen to add the new\n> > functions at the bottom of each file, which is seldom a good choice.\n> > I think in the case of CombineRangeTables it should be the very first\n> > function in the file, before all the walker-type stuff; and for\n> > Add/GetRTEPermissionInfo I would suggest that right below\n> > addRangeTableEntryForENR might be a decent choice (need to fix the .h\n> > files to match, of course.)\n>\n> Okay, I've moved the functions and their .h declarations to the places\n> you suggest. While at it, I also uncapitalized Add/Get, because\n> that's how the nearby functions in the header are named.\n>\n> Thanks again for the review. The patch looks much better than it did\n> 3 weeks ago.\n\nRebased over 2605643a3a9d.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 5 Dec 2022 12:09:27 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "I have pushed this finally.\n\nI made two further changes:\n\n1. there was no reason to rename ExecCheckPerms_hook, since its\n signature was changing anyway. I reverted it to the original name.\n\n2. I couldn't find any reason to expose ExecGetRTEPermissionInfo, and\n given that it's a one-line function, I removed it.\n\nMaybe you had a reason to add ExecGetRTEPermissionInfo, thinking about\nexternal callers; if so please discuss it.\n\nI'll mark this commitfest entry as committed soon; please post the other\ntwo patches you had in this series in a new thread.\n\n-- \nÁlvaro Herrera Breisgau, Deutschland — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Tue, 6 Dec 2022 16:19:05 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, Dec 7, 2022 at 12:19 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> I have pushed this finally.\n\nThanks a lot.\n\n> I made two further changes:\n>\n> 1. there was no reason to rename ExecCheckPerms_hook, since its\n> signature was changing anyway. I reverted it to the original name.\n\nSure, that makes sense.\n\n> 2. I couldn't find any reason to expose ExecGetRTEPermissionInfo, and\n> given that it's a one-line function, I removed it.\n>\n> Maybe you had a reason to add ExecGetRTEPermissionInfo, thinking about\n> external callers; if so please discuss it.\n\nMy thinking was that it might be better to have a macro/function that\ntakes EState, not es_rteperminfos, from the callers. Kind of like how\nthere is exec_rt_fetch(). Though, that is only a cosmetic\nconsideration, so I don't want to insist.\n\n> I'll mark this commitfest entry as committed soon; please post the other\n> two patches you had in this series in a new thread.\n\nWill do, thanks.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Wed, 7 Dec 2022 16:01:18 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, Dec 7, 2022 at 4:01 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Wed, Dec 7, 2022 at 12:19 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > I have pushed this finally.\n>>\n> > I'll mark this commitfest entry as committed soon; please post the other\n> > two patches you had in this series in a new thread.\n>\n> Will do, thanks.\n\nWhile doing that, I noticed that I had missed updating at least one\ncomment which still says that permission checking is done off of the\nrange table. Attached patch fixes that.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 7 Dec 2022 17:47:13 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On 2022-Dec-06, Alvaro Herrera wrote:\n\n> I have pushed this finally.\n> \n> I made two further changes:\n\nActually, I made one further change that I forgot to mention -- I\nchanged the API of CombineRangeTables once again; the committed patch\nhas it this way:\n\n+/*\n+ * CombineRangeTables\n+ * Adds the RTEs of 'src_rtable' into 'dst_rtable'\n+ *\n+ * This also adds the RTEPermissionInfos of 'src_perminfos' (belonging to the\n+ * RTEs in 'src_rtable') into *dst_perminfos and also updates perminfoindex of\n+ * the RTEs in 'src_rtable' to now point to the perminfos' indexes in\n+ * *dst_perminfos.\n+ *\n+ * Note that this changes both 'dst_rtable' and 'dst_perminfo' destructively,\n+ * so the caller should have better passed safe-to-modify copies.\n+ */\n+void\n+CombineRangeTables(List **dst_rtable, List **dst_perminfos,\n+ List *src_rtable, List *src_perminfos)\n\nThe original one had the target rangetable first, then the source\nRT+perminfos, and the target perminfos at the end. This seemed\ninconsistent and potentially confusing. I also changed the argument\nnames (from using numbers to \"dst/src\" monikers) and removed the\nbehavior of returning the list: ISTM it did turn out to be a bad idea\nafter all.\n\n-- \nÁlvaro Herrera 48°01'N 7°57'E — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Wed, 7 Dec 2022 11:42:59 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Wed, Dec 7, 2022 at 7:43 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> On 2022-Dec-06, Alvaro Herrera wrote:\n> > I have pushed this finally.\n> >\n> > I made two further changes:\n>\n> Actually, I made one further change that I forgot to mention -- I\n> changed the API of CombineRangeTables once again; the committed patch\n> has it this way:\n>\n> +/*\n> + * CombineRangeTables\n> + * Adds the RTEs of 'src_rtable' into 'dst_rtable'\n> + *\n> + * This also adds the RTEPermissionInfos of 'src_perminfos' (belonging to the\n> + * RTEs in 'src_rtable') into *dst_perminfos and also updates perminfoindex of\n> + * the RTEs in 'src_rtable' to now point to the perminfos' indexes in\n> + * *dst_perminfos.\n> + *\n> + * Note that this changes both 'dst_rtable' and 'dst_perminfo' destructively,\n> + * so the caller should have better passed safe-to-modify copies.\n> + */\n> +void\n> +CombineRangeTables(List **dst_rtable, List **dst_perminfos,\n> + List *src_rtable, List *src_perminfos)\n>\n> The original one had the target rangetable first, then the source\n> RT+perminfos, and the target perminfos at the end. This seemed\n> inconsistent and potentially confusing. I also changed the argument\n> names (from using numbers to \"dst/src\" monikers) and removed the\n> behavior of returning the list: ISTM it did turn out to be a bad idea\n> after all.\n\nThis looks better to me too.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Wed, 7 Dec 2022 19:50:11 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On 2022-Dec-07, Amit Langote wrote:\n\n> While doing that, I noticed that I had missed updating at least one\n> comment which still says that permission checking is done off of the\n> range table. Attached patch fixes that.\n\nPushed, thanks.\n\n-- \nÁlvaro Herrera Breisgau, Deutschland — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Wed, 7 Dec 2022 12:44:15 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions" }, { "msg_contents": "On Tue, Nov 29, 2022 at 10:37:56PM +0900, Amit Langote wrote:\n> 0002 contains changes that has to do with changing how we access\n> checkAsUser in some foreign table planning/execution code sites.\n> Thought it might be better to describe it separately too.\n\nThis was committed as 599b33b94:\n Stop accessing checkAsUser via RTE in some cases\n\nWhich does this in a couple places in selfuncs.c:\n\n if (!vardata->acl_ok &&\n root->append_rel_array != NULL)\n { \n AppendRelInfo *appinfo;\n Index varno = index->rel->relid;\n\n appinfo = root->append_rel_array[varno];\n while (appinfo &&\n planner_rt_fetch(appinfo->parent_relid,\n root)->rtekind == RTE_RELATION)\n { \n varno = appinfo->parent_relid;\n appinfo = root->append_rel_array[varno];\n }\n if (varno != index->rel->relid)\n { \n /* Repeat access check on this rel */\n rte = planner_rt_fetch(varno, root);\n Assert(rte->rtekind == RTE_RELATION);\n\n- userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();\n+ userid = OidIsValid(onerel->userid) ?\n+ onerel->userid : GetUserId();\n\n vardata->acl_ok =\n rte->securityQuals == NIL &&\n (pg_class_aclcheck(rte->relid,\n userid,\n ACL_SELECT) == ACLCHECK_OK);\n }\n }\n\n\nThe original code rechecks rte->checkAsUser with the rte of the parent\nrel. The patch changed to access onerel instead, but that's not updated\nafter looping to find the parent.\n\nIs that okay ? It doesn't seem intentional, since \"userid\" is still\nbeing recomputed, but based on onerel, which hasn't changed. The\noriginal intent (since 553d2ec27) is to recheck the parent's\n\"checkAsUser\". \n\nIt seems like this would matter for partitioned tables, when the\npartition isn't readable, but its parent is, and accessed via a view\nowned by another user?\n\n-- \nJustin\n\n\n", "msg_date": "Sat, 10 Dec 2022 14:17:53 -0600", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "Hi,\n\nOn Sun, Dec 11, 2022 at 5:17 AM Justin Pryzby <pryzby@telsasoft.com> wrote:\n> On Tue, Nov 29, 2022 at 10:37:56PM +0900, Amit Langote wrote:\n> > 0002 contains changes that has to do with changing how we access\n> > checkAsUser in some foreign table planning/execution code sites.\n> > Thought it might be better to describe it separately too.\n>\n> This was committed as 599b33b94:\n> Stop accessing checkAsUser via RTE in some cases\n>\n> Which does this in a couple places in selfuncs.c:\n>\n> if (!vardata->acl_ok &&\n> root->append_rel_array != NULL)\n> {\n> AppendRelInfo *appinfo;\n> Index varno = index->rel->relid;\n>\n> appinfo = root->append_rel_array[varno];\n> while (appinfo &&\n> planner_rt_fetch(appinfo->parent_relid,\n> root)->rtekind == RTE_RELATION)\n> {\n> varno = appinfo->parent_relid;\n> appinfo = root->append_rel_array[varno];\n> }\n> if (varno != index->rel->relid)\n> {\n> /* Repeat access check on this rel */\n> rte = planner_rt_fetch(varno, root);\n> Assert(rte->rtekind == RTE_RELATION);\n>\n> - userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();\n> + userid = OidIsValid(onerel->userid) ?\n> + onerel->userid : GetUserId();\n>\n> vardata->acl_ok =\n> rte->securityQuals == NIL &&\n> (pg_class_aclcheck(rte->relid,\n> userid,\n> ACL_SELECT) == ACLCHECK_OK);\n> }\n> }\n>\n>\n> The original code rechecks rte->checkAsUser with the rte of the parent\n> rel. The patch changed to access onerel instead, but that's not updated\n> after looping to find the parent.\n>\n> Is that okay ? It doesn't seem intentional, since \"userid\" is still\n> being recomputed, but based on onerel, which hasn't changed. The\n> original intent (since 553d2ec27) is to recheck the parent's\n> \"checkAsUser\".\n>\n> It seems like this would matter for partitioned tables, when the\n> partition isn't readable, but its parent is, and accessed via a view\n> owned by another user?\n\nThanks for pointing this out.\n\nI think these blocks which are rewriting userid to basically the same\nvalue should have been removed from these sites as part of 599b33b94.\nEven before that commit, the checkAsUser value should have been the\nsame in the RTE of both the child relation passed to these functions\nand that of the root parent that's looked up by looping. That's\nbecause expand_single_inheritance_child(), which adds child RTEs,\ncopies the parent RTE's checkAsUser, that is, as of commit 599b33b94.\nA later commit a61b1f74823c has removed the checkAsUser field from\nRangeTblEntry.\n\nMoreover, 599b33b94 adds some code in build_simple_rel() to set a\ngiven rel's userid value by copying it from the parent rel, such that\nthe userid value would be the same in all relations in a given\ninheritance tree.\n\nI've attached 0001 to remove those extraneous code blocks and add a\ncomment mentioning that userid need not be recomputed.\n\nWhile staring at the build_simple_rel() bit mentioned above, I\nrealized that this code fails to set userid correctly in the\ninheritance parent rels that are child relations of subquery parent\nrelations, such as UNION ALL subqueries. In that case, instead of\ncopying the userid (= 0) of the parent rel, the child should look up\nits own RTEPermissionInfo, which should be there, and use the\ncheckAsUser value from there. I've attached 0002 to fix this hole. I\nam not sure whether there's a way to add a test case for this in the\ncore suite.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Sun, 11 Dec 2022 18:25:48 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "On Sun, Dec 11, 2022 at 06:25:48PM +0900, Amit Langote wrote:\n> On Sun, Dec 11, 2022 at 5:17 AM Justin Pryzby <pryzby@telsasoft.com> wrote:\n> > The original code rechecks rte->checkAsUser with the rte of the parent\n> > rel. The patch changed to access onerel instead, but that's not updated\n> > after looping to find the parent.\n> >\n> > Is that okay ? It doesn't seem intentional, since \"userid\" is still\n> > being recomputed, but based on onerel, which hasn't changed. The\n> > original intent (since 553d2ec27) is to recheck the parent's\n> > \"checkAsUser\".\n> >\n> > It seems like this would matter for partitioned tables, when the\n> > partition isn't readable, but its parent is, and accessed via a view\n> > owned by another user?\n> \n> Thanks for pointing this out.\n> \n> I think these blocks which are rewriting userid to basically the same\n> value should have been removed from these sites as part of 599b33b94.\n\nI thought maybe; thanks for checking.\n\nLittle nitpicks:\n\n001:\nFine to use the same userid as it's same in all\n=> the same\n\n002:\ngive that it's a subquery rel.\n=> given\n\n-- \nJustin\n\n\n", "msg_date": "Sun, 11 Dec 2022 08:21:58 -0600", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "On Sun, Dec 11, 2022 at 6:25 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> I've attached 0001 to remove those extraneous code blocks and add a\n> comment mentioning that userid need not be recomputed.\n>\n> While staring at the build_simple_rel() bit mentioned above, I\n> realized that this code fails to set userid correctly in the\n> inheritance parent rels that are child relations of subquery parent\n> relations, such as UNION ALL subqueries. In that case, instead of\n> copying the userid (= 0) of the parent rel, the child should look up\n> its own RTEPermissionInfo, which should be there, and use the\n> checkAsUser value from there. I've attached 0002 to fix this hole. I\n> am not sure whether there's a way to add a test case for this in the\n> core suite.\n\nAh, I realized we could just expand the test added by 553d2ec27 with a\nwrapper view (to test checkAsUser functionality) and a UNION ALL query\nover the view (to test this change).\n\nI've done that in the attached updated patch, in which I've also\naddressed Justin's comments.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 12 Dec 2022 15:23:22 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "Alvaro could you comment on this ?\n\n\n", "msg_date": "Wed, 21 Dec 2022 13:44:11 -0600", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "On Wed, Dec 21, 2022 at 01:44:11PM -0600, Justin Pryzby wrote:\n> Alvaro could you comment on this ?\n\nI added here so it's not forgotten.\nhttps://commitfest.postgresql.org/42/4107/\n\n\n", "msg_date": "Thu, 5 Jan 2023 15:12:20 -0600", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "Justin Pryzby <pryzby@telsasoft.com> writes:\n> On Wed, Dec 21, 2022 at 01:44:11PM -0600, Justin Pryzby wrote:\n>> Alvaro could you comment on this ?\n\nI believe Alvaro's on vacation for a few days more.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 05 Jan 2023 16:25:41 -0500", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "On 2022-Dec-12, Amit Langote wrote:\n\n> On Sun, Dec 11, 2022 at 6:25 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > I've attached 0001 to remove those extraneous code blocks and add a\n> > comment mentioning that userid need not be recomputed.\n> >\n> > While staring at the build_simple_rel() bit mentioned above, I\n> > realized that this code fails to set userid correctly in the\n> > inheritance parent rels that are child relations of subquery parent\n> > relations, such as UNION ALL subqueries. In that case, instead of\n> > copying the userid (= 0) of the parent rel, the child should look up\n> > its own RTEPermissionInfo, which should be there, and use the\n> > checkAsUser value from there. I've attached 0002 to fix this hole. I\n> > am not sure whether there's a way to add a test case for this in the\n> > core suite.\n> \n> Ah, I realized we could just expand the test added by 553d2ec27 with a\n> wrapper view (to test checkAsUser functionality) and a UNION ALL query\n> over the view (to test this change).\n\nHmm, but if I run this test without the code change in 0002, the test\nalso passes (to wit: the plan still has two hash joins), so I understand\nthat either you're testing something that's not changed by the patch,\nor the test case itself isn't really what you wanted.\n\nAs for 0001, it seems simpler to me to put the 'userid' variable in the\nsame scope as 'onerel', and then compute it just once and don't bother\nwith it at all afterwards, as in the attached.\n\n-- \nÁlvaro Herrera 48°01'N 7°57'E — https://www.EnterpriseDB.com/\nAl principio era UNIX, y UNIX habló y dijo: \"Hello world\\n\".\nNo dijo \"Hello New Jersey\\n\", ni \"Hello USA\\n\".", "msg_date": "Tue, 17 Jan 2023 11:26:54 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "On Tue, Jan 17, 2023 at 7:33 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> On 2022-Dec-12, Amit Langote wrote:\n> > On Sun, Dec 11, 2022 at 6:25 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > > I've attached 0001 to remove those extraneous code blocks and add a\n> > > comment mentioning that userid need not be recomputed.\n> > >\n> > > While staring at the build_simple_rel() bit mentioned above, I\n> > > realized that this code fails to set userid correctly in the\n> > > inheritance parent rels that are child relations of subquery parent\n> > > relations, such as UNION ALL subqueries. In that case, instead of\n> > > copying the userid (= 0) of the parent rel, the child should look up\n> > > its own RTEPermissionInfo, which should be there, and use the\n> > > checkAsUser value from there. I've attached 0002 to fix this hole. I\n> > > am not sure whether there's a way to add a test case for this in the\n> > > core suite.\n> >\n> > Ah, I realized we could just expand the test added by 553d2ec27 with a\n> > wrapper view (to test checkAsUser functionality) and a UNION ALL query\n> > over the view (to test this change).\n>\n> Hmm, but if I run this test without the code change in 0002, the test\n> also passes (to wit: the plan still has two hash joins), so I understand\n> that either you're testing something that's not changed by the patch,\n> or the test case itself isn't really what you wanted.\n\nYeah, the test case is bogus. :-(.\n\nIt seems that, with the test as written, it's not the partitioned\ntable referenced in the view's query that becomes a child of the UNION\nALL parent subquery, but the subquery itself. The bug being fixed in\n0002 doesn't affect the planning of this query at all, because child\nsubquery is planned independently of the main query involving UNION\nALL because of it being unable to be pushed up into the latter. We\nwant the partitioned table referenced in the child subquery to become\na child of the UNION ALL parent subquery for the bug to be relevant.\n\nI tried rewriting the test such that the view's subquery does get\npulled up such that the partitioned table becomes a child of the UNION\nALL subquery. By attaching a debugger, I do see the bug affecting the\nplanning of this query, but still not in a way that changes the plan.\nI will keep trying but in the meantime I'm attaching 0001 to show the\nrewritten query and the plan.\n\n> As for 0001, it seems simpler to me to put the 'userid' variable in the\n> same scope as 'onerel', and then compute it just once and don't bother\n> with it at all afterwards, as in the attached.\n\nThat sounds better. Attached as 0002.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Thu, 19 Jan 2023 20:16:16 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "On 2023-Jan-19, Amit Langote wrote:\n\n> It seems that, with the test as written, it's not the partitioned\n> table referenced in the view's query that becomes a child of the UNION\n> ALL parent subquery, but the subquery itself. The bug being fixed in\n> 0002 doesn't affect the planning of this query at all, because child\n> subquery is planned independently of the main query involving UNION\n> ALL because of it being unable to be pushed up into the latter. We\n> want the partitioned table referenced in the child subquery to become\n> a child of the UNION ALL parent subquery for the bug to be relevant.\n> \n> I tried rewriting the test such that the view's subquery does get\n> pulled up such that the partitioned table becomes a child of the UNION\n> ALL subquery. By attaching a debugger, I do see the bug affecting the\n> planning of this query, but still not in a way that changes the plan.\n> I will keep trying but in the meantime I'm attaching 0001 to show the\n> rewritten query and the plan.\n\nThanks for spending time tracking down a test case. I'll try to have a\nlook later today.\n\n> > As for 0001, it seems simpler to me to put the 'userid' variable in the\n> > same scope as 'onerel', and then compute it just once and don't bother\n> > with it at all afterwards, as in the attached.\n> \n> That sounds better. Attached as 0002.\n\nPushed this one, thank you.\n\n-- \nÁlvaro Herrera 48°01'N 7°57'E — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Thu, 19 Jan 2023 13:25:20 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "On Tue, Nov 29, 2022 at 10:37:56PM +0900, Amit Langote wrote:\n> 0002 contains changes that has to do with changing how we access\n> checkAsUser in some foreign table planning/execution code sites.\n> Thought it might be better to describe it separately too.\n\nThis was committed as 599b33b94:\n Stop accessing checkAsUser via RTE in some cases\n\nThat seems to add various elog()s which are hit frequently by sqlsmith:\n\npostgres=# select from\n(select transaction\nfrom pg_prepared_xacts\nright join pg_available_extensions\non false limit 0) where false;\nERROR: permission info at index 2 (with relid=1262) does not match provided RTE (with relid=12081)\n\npostgres=# select from (select confl_tablespace\nfrom pg_stat_database_conflicts\nwhere datname <> (select 'af')\nlimit 1) where false;\nERROR: invalid perminfoindex 1 in RTE with relid 12271\n\n\n\n", "msg_date": "Sun, 12 Feb 2023 17:37:11 -0600", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (sqlsmith)" }, { "msg_contents": "On Mon, Feb 13, 2023 at 5:07 Justin Pryzby <pryzby@telsasoft.com> wrote:\n\n> On Tue, Nov 29, 2022 at 10:37:56PM +0900, Amit Langote wrote:\n> > 0002 contains changes that has to do with changing how we access\n> > checkAsUser in some foreign table planning/execution code sites.\n> > Thought it might be better to describe it separately too.\n>\n> This was committed as 599b33b94:\n> Stop accessing checkAsUser via RTE in some cases\n>\n> That seems to add various elog()s which are hit frequently by sqlsmith:\n>\n> postgres=# select from\n> (select transaction\n> from pg_prepared_xacts\n> right join pg_available_extensions\n> on false limit 0) where false;\n> ERROR: permission info at index 2 (with relid=1262) does not match\n> provided RTE (with relid=12081)\n>\n> postgres=# select from (select confl_tablespace\n> from pg_stat_database_conflicts\n> where datname <> (select 'af')\n> limit 1) where false;\n> ERROR: invalid perminfoindex 1 in RTE with relid 12271\n\n\nThanks for the report. I’ll take a look once I’m back at a computer in a\nfew days.\n\n> --\nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\nOn Mon, Feb 13, 2023 at 5:07 Justin Pryzby <pryzby@telsasoft.com> wrote:On Tue, Nov 29, 2022 at 10:37:56PM +0900, Amit Langote wrote:\n> 0002 contains changes that has to do with changing how we access\n> checkAsUser in some foreign table planning/execution code sites.\n> Thought it might be better to describe it separately too.\n\nThis was committed as 599b33b94:\n    Stop accessing checkAsUser via RTE in some cases\n\nThat seems to add various elog()s which are hit frequently by sqlsmith:\n\npostgres=# select from\n(select transaction\nfrom pg_prepared_xacts\nright join pg_available_extensions\non false limit 0) where false;\nERROR:  permission info at index 2 (with relid=1262) does not match provided RTE (with relid=12081)\n\npostgres=# select from (select confl_tablespace\nfrom pg_stat_database_conflicts\nwhere datname <> (select 'af')\nlimit 1) where false;\nERROR:  invalid perminfoindex 1 in RTE with relid 12271Thanks for the report.  I’ll take a look once I’m back at a computer in a few days.-- Thanks, Amit LangoteEDB: http://www.enterprisedb.com", "msg_date": "Mon, 13 Feb 2023 13:44:44 +0530", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (sqlsmith)" }, { "msg_contents": "Amit Langote <amitlangote09@gmail.com> writes:\n> On Mon, Feb 13, 2023 at 5:07 Justin Pryzby <pryzby@telsasoft.com> wrote:\n>> That seems to add various elog()s which are hit frequently by sqlsmith:\n\n> Thanks for the report. I’ll take a look once I’m back at a computer in a\n> few days.\n\nLooks like we already have a diagnosis and fix [1]. I'll get that\npushed.\n\n\t\t\tregards, tom lane\n\n[1] https://www.postgresql.org/message-id/CAHewXNnnNySD_YcKNuFpQDV2gxWA7_YLWqHmYVcyoOYxn8kY2A%40mail.gmail.com\n\n\n", "msg_date": "Mon, 13 Feb 2023 12:01:38 -0500", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (sqlsmith)" }, { "msg_contents": "On Mon, Feb 13, 2023 at 22:31 Tom Lane <tgl@sss.pgh.pa.us> wrote:\n\n> Amit Langote <amitlangote09@gmail.com> writes:\n> > On Mon, Feb 13, 2023 at 5:07 Justin Pryzby <pryzby@telsasoft.com> wrote:\n> >> That seems to add various elog()s which are hit frequently by sqlsmith:\n>\n> > Thanks for the report. I’ll take a look once I’m back at a computer in a\n> > few days.\n>\n> Looks like we already have a diagnosis and fix [1]. I'll get that\n> pushed.\n>\n> regards, tom lane\n>\n> [1]\n> https://www.postgresql.org/message-id/CAHewXNnnNySD_YcKNuFpQDV2gxWA7_YLWqHmYVcyoOYxn8kY2A%40mail.gmail.com\n\n\nOh, thanks a lot.\n\n>\n> <https://www.postgresql.org/message-id/CAHewXNnnNySD_YcKNuFpQDV2gxWA7_YLWqHmYVcyoOYxn8kY2A%40mail.gmail.com>\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\nOn Mon, Feb 13, 2023 at 22:31 Tom Lane <tgl@sss.pgh.pa.us> wrote:Amit Langote <amitlangote09@gmail.com> writes:\n> On Mon, Feb 13, 2023 at 5:07 Justin Pryzby <pryzby@telsasoft.com> wrote:\n>> That seems to add various elog()s which are hit frequently by sqlsmith:\n\n> Thanks for the report.  I’ll take a look once I’m back at a computer in a\n> few days.\n\nLooks like we already have a diagnosis and fix [1].  I'll get that\npushed.\n\n                        regards, tom lane\n\n[1] https://www.postgresql.org/message-id/CAHewXNnnNySD_YcKNuFpQDV2gxWA7_YLWqHmYVcyoOYxn8kY2A%40mail.gmail.comOh, thanks a lot.-- Thanks, Amit LangoteEDB: http://www.enterprisedb.com", "msg_date": "Tue, 14 Feb 2023 11:14:59 +0530", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (sqlsmith)" }, { "msg_contents": "On 2022-Dec-11, Amit Langote wrote:\n\n> While staring at the build_simple_rel() bit mentioned above, I\n> realized that this code fails to set userid correctly in the\n> inheritance parent rels that are child relations of subquery parent\n> relations, such as UNION ALL subqueries. In that case, instead of\n> copying the userid (= 0) of the parent rel, the child should look up\n> its own RTEPermissionInfo, which should be there, and use the\n> checkAsUser value from there. I've attached 0002 to fix this hole. I\n> am not sure whether there's a way to add a test case for this in the\n> core suite.\n\nI gave this a look and I thought it was clearer to have the new\ncondition depend on rel->reloptkind instead parent or no.\n\nI tried a few things for a new test case, but I was unable to find\nanything useful. Maybe an intermediate view, I thought; no dice.\nMaybe one with a security barrier would do? Anyway, for now I just kept\nwhat you added in v2.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/", "msg_date": "Fri, 17 Feb 2023 13:02:46 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "On 2023-Feb-17, Alvaro Herrera wrote:\n\n> I tried a few things for a new test case, but I was unable to find\n> anything useful. Maybe an intermediate view, I thought; no dice.\n> Maybe one with a security barrier would do? Anyway, for now I just kept\n> what you added in v2.\n\nSorry, I failed to keep count of the patch version correctly. The test\ncase here is what you sent in v3 [1], and consequently the patch I just\nattached should have been labelled v4.\n\n[1] https://postgr.es/m/CA+HiwqF6ricH7HFCkyrK72c=KN-PRkdncxdLmU_mEQx=DRAkJA@mail.gmail.com\n\n-- \nÁlvaro Herrera 48°01'N 7°57'E — https://www.EnterpriseDB.com/\n\"La vida es para el que se aventura\"\n\n\n", "msg_date": "Fri, 17 Feb 2023 13:05:58 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "On Fri, Feb 17, 2023 at 9:02 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> On 2022-Dec-11, Amit Langote wrote:\n> > While staring at the build_simple_rel() bit mentioned above, I\n> > realized that this code fails to set userid correctly in the\n> > inheritance parent rels that are child relations of subquery parent\n> > relations, such as UNION ALL subqueries. In that case, instead of\n> > copying the userid (= 0) of the parent rel, the child should look up\n> > its own RTEPermissionInfo, which should be there, and use the\n> > checkAsUser value from there. I've attached 0002 to fix this hole. I\n> > am not sure whether there's a way to add a test case for this in the\n> > core suite.\n>\n> I gave this a look and I thought it was clearer to have the new\n> condition depend on rel->reloptkind instead parent or no.\n\nThanks for looking into this again. I agree the condition with\nreloptkind might be better.\n\n> I tried a few things for a new test case, but I was unable to find\n> anything useful. Maybe an intermediate view, I thought; no dice.\n> Maybe one with a security barrier would do? Anyway, for now I just kept\n> what you added in v2.\n\nHmm, I'm fine with leaving the test case out if it doesn't really test\nthe code we're changing, as you also pointed out?\n\nOne more thing we could try is come up with a postgres_fdw test case,\nbecause it uses the RelOptInfo.userid value for remote-costs-based\npath size estimation. But adding a test case to contrib module's\nsuite test a core planner change might seem strange, ;-).\n\nAttaching v4 without the test case.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 20 Feb 2023 16:56:22 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "On 2023-Feb-20, Amit Langote wrote:\n\n> On Fri, Feb 17, 2023 at 9:02 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n\n> > I tried a few things for a new test case, but I was unable to find\n> > anything useful. Maybe an intermediate view, I thought; no dice.\n> > Maybe one with a security barrier would do? Anyway, for now I just kept\n> > what you added in v2.\n> \n> Hmm, I'm fine with leaving the test case out if it doesn't really test\n> the code we're changing, as you also pointed out?\n\nYeah, pushed like that.\n\n> One more thing we could try is come up with a postgres_fdw test case,\n> because it uses the RelOptInfo.userid value for remote-costs-based\n> path size estimation. But adding a test case to contrib module's\n> suite test a core planner change might seem strange, ;-).\n\nMaybe. Perhaps adding it in a separate file there is okay?\n\n-- \nÁlvaro Herrera 48°01'N 7°57'E — https://www.EnterpriseDB.com/\n\"Small aircraft do not crash frequently ... usually only once!\"\n (ponder, http://thedailywtf.com/)\n\n\n", "msg_date": "Mon, 20 Feb 2023 16:19:13 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> On 2023-Feb-20, Amit Langote wrote:\n>> One more thing we could try is come up with a postgres_fdw test case,\n>> because it uses the RelOptInfo.userid value for remote-costs-based\n>> path size estimation. But adding a test case to contrib module's\n>> suite test a core planner change might seem strange, ;-).\n\n> Maybe. Perhaps adding it in a separate file there is okay?\n\nThere is plenty of stuff in contrib module tests that is really\nthere to test core-code behavior. (You could indeed argue that\n*all* of contrib is there for that purpose.) If it's not\nconvenient to test something without an extension, just do it\nand don't sweat about that.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 20 Feb 2023 10:40:10 -0500", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "On Tue, Feb 21, 2023 at 12:40 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> > On 2023-Feb-20, Amit Langote wrote:\n> >> One more thing we could try is come up with a postgres_fdw test case,\n> >> because it uses the RelOptInfo.userid value for remote-costs-based\n> >> path size estimation. But adding a test case to contrib module's\n> >> suite test a core planner change might seem strange, ;-).\n>\n> > Maybe. Perhaps adding it in a separate file there is okay?\n>\n> There is plenty of stuff in contrib module tests that is really\n> there to test core-code behavior. (You could indeed argue that\n> *all* of contrib is there for that purpose.) If it's not\n> convenient to test something without an extension, just do it\n> and don't sweat about that.\n\nOK. Attached adds a test case to postgres_fdw's suite. You can see\nthat it fails without a316a3bc.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Tue, 21 Feb 2023 16:12:14 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "Hi,\n\nOn Tue, Feb 21, 2023 at 4:12 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Tue, Feb 21, 2023 at 12:40 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> > Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> > > On 2023-Feb-20, Amit Langote wrote:\n> > >> One more thing we could try is come up with a postgres_fdw test case,\n> > >> because it uses the RelOptInfo.userid value for remote-costs-based\n> > >> path size estimation. But adding a test case to contrib module's\n> > >> suite test a core planner change might seem strange, ;-).\n> >\n> > > Maybe. Perhaps adding it in a separate file there is okay?\n> >\n> > There is plenty of stuff in contrib module tests that is really\n> > there to test core-code behavior. (You could indeed argue that\n> > *all* of contrib is there for that purpose.) If it's not\n> > convenient to test something without an extension, just do it\n> > and don't sweat about that.\n>\n> OK. Attached adds a test case to postgres_fdw's suite. You can see\n> that it fails without a316a3bc.\n\nNoticed that there's an RfC entry for this in the next CF. Here's an\nupdated version of the patch where I updated the comments a bit and\nthe commit message.\n\nI'm thinking of pushing this on Friday barring objections.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 28 Jun 2023 16:30:54 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" }, { "msg_contents": "On Wed, Jun 28, 2023 at 4:30 PM Amit Langote <amitlangote09@gmail.com> wrote:\n>\n> Hi,\n>\n> On Tue, Feb 21, 2023 at 4:12 PM Amit Langote <amitlangote09@gmail.com> wrote:\n> > On Tue, Feb 21, 2023 at 12:40 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> > > Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> > > > On 2023-Feb-20, Amit Langote wrote:\n> > > >> One more thing we could try is come up with a postgres_fdw test case,\n> > > >> because it uses the RelOptInfo.userid value for remote-costs-based\n> > > >> path size estimation. But adding a test case to contrib module's\n> > > >> suite test a core planner change might seem strange, ;-).\n> > >\n> > > > Maybe. Perhaps adding it in a separate file there is okay?\n> > >\n> > > There is plenty of stuff in contrib module tests that is really\n> > > there to test core-code behavior. (You could indeed argue that\n> > > *all* of contrib is there for that purpose.) If it's not\n> > > convenient to test something without an extension, just do it\n> > > and don't sweat about that.\n> >\n> > OK. Attached adds a test case to postgres_fdw's suite. You can see\n> > that it fails without a316a3bc.\n>\n> Noticed that there's an RfC entry for this in the next CF. Here's an\n> updated version of the patch where I updated the comments a bit and\n> the commit message.\n>\n> I'm thinking of pushing this on Friday barring objections.\n\nSeeing none, I've pushed this to HEAD and 16.\n\nMarking the CF entry as committed.\n\n-- \nThanks, Amit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 30 Jun 2023 15:54:38 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ExecRTCheckPerms() and many prunable partitions (checkAsUser)" } ]
[ { "msg_contents": "Back when working on 959d00e9d, to allow ordered partition scans for\nLIST and RANGE partitioned tables, I mentioned [1] that if we had a\nfield that recorded a Bitmapset of the non-pruned partitions, we could\nuse that to do a more thorough check to see if ordered scans are\npossible.\n\nAt the moment these ordered scans are possible if the order by matches\nthe partition key and, for LIST partitioned tables we must also ensure\nthat we don't have something like partition1 FOR VALUES IN(1,3) and\npartition2 FOR VALUES(2,4). Here we can't scan partition1 first before\nwe'd get 3s before the 2s that are in partition2. Since we don't\nrecord the list of partitions that survived pruning, I just made that\ncheck to ensure all partitions only allow a single value to be stored.\nThat means the optimisation is not done in cases where it's possible\nto do it.\n\nTo make this work, as I mentioned in [1], we really need a live_parts\nfield to track which partitions survived pruning. If we have that\nthen we can ensure that just those partitions have no instances where\na lower value could appear in a later partition.\n\nIn the attached patch, I've only added the live_parts field and made\nuse of it in a few locations where the knowledge is useful as an\noptimisation. Right now apply_scanjoin_target_to_paths() appears in\nprofiles when planning a query to a partitioned table with many\npartitions and just 1 or a few survive pruning. The reason for this is\nsimple; looping over a large almost entirely empty array is just slow\nand using the Bitmapset as a sort of index into the interesting\nelements of that array speeds it up, quite a bit.\n\nSince we've already put quite a bit of effort into making that fast,\nthen I think it might be worth adding this field even if it was just\nfor that purpose.\n\nWith 10k empty hash partitions and 1 random one surviving partition\npruning, I see:\n\nMaster:\n 18.13% postgres postgres [.] apply_scanjoin_target_to_paths\n 3.66% postgres postgres [.] AllocSetAlloc\n 2.05% postgres postgres [.] hash_search_with_hash_value\n 1.95% postgres libc-2.33.so [.] __memset_avx2_unaligned_erms\n 1.88% postgres postgres [.] SearchCatCacheInternal\n 1.55% postgres postgres [.] base_yyparse\n 0.74% postgres postgres [.] get_relation_info\n 0.68% postgres [kernel.kallsyms] [k] __d_lookup_rcu\n 0.61% postgres postgres [.] palloc\n\nPatched:\n 3.72% postgres postgres [.] AllocSetAlloc\n 2.30% postgres postgres [.] hash_search_with_hash_value\n 2.22% postgres postgres [.] SearchCatCacheInternal\n 2.02% postgres libc-2.33.so [.] __memset_avx2_unaligned_erms\n 1.88% postgres postgres [.] base_yyparse\n 1.08% postgres postgres [.] palloc\n 0.92% postgres [kernel.kallsyms] [k] __d_lookup_rcu\n\n$ cat setup.sql\ncreate table hp (a int primary key, b int not null) partition by hash(a);\nselect 'create table hp'||x|| ' partition of hp for values with\n(modulus 10000, remainder '||x||');' from generate_series(0,9999) x;\n\\gexec\n\n$ cat select.sql\n\\set p random(1,2000000)\nselect * from hp where a = :p\n\nmaster\n\n$ pgbench -n -f select.sql -c 1 -j 1 -T 60 postgres\ntps = 2608.704218 (without initial connection time)\ntps = 2607.641247 (without initial connection time)\ntps = 2583.017011 (without initial connection time)\n\npatched\n\n$ pgbench -n -f select.sql -c 1 -j 1 -T 60 postgres\ntps = 2715.993495 (without initial connection time)\ntps = 2701.527640 (without initial connection time)\ntps = 2707.343009 (without initial connection time)\n\nDoes anyone have any thoughts about if this is worth a new field in RelOptInfo?\n\nDavid\n\n[1] https://www.postgresql.org/message-id/CAKJS1f9W7sg1sb3SXiTQUovs%3DwDMrHATXv68F5dSbe5fuHH%2BiQ%40mail.gmail.com", "msg_date": "Thu, 1 Jul 2021 01:59:28 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "On Wed, Jun 30, 2021 at 10:59 PM David Rowley <dgrowleyml@gmail.com> wrote:\n> Back when working on 959d00e9d, to allow ordered partition scans for\n> LIST and RANGE partitioned tables, I mentioned [1] that if we had a\n> field that recorded a Bitmapset of the non-pruned partitions, we could\n> use that to do a more thorough check to see if ordered scans are\n> possible.\n>\n> At the moment these ordered scans are possible if the order by matches\n> the partition key and, for LIST partitioned tables we must also ensure\n> that we don't have something like partition1 FOR VALUES IN(1,3) and\n> partition2 FOR VALUES(2,4). Here we can't scan partition1 first before\n> we'd get 3s before the 2s that are in partition2. Since we don't\n> record the list of partitions that survived pruning, I just made that\n> check to ensure all partitions only allow a single value to be stored.\n> That means the optimisation is not done in cases where it's possible\n> to do it.\n>\n> To make this work, as I mentioned in [1], we really need a live_parts\n> field to track which partitions survived pruning. If we have that\n> then we can ensure that just those partitions have no instances where\n> a lower value could appear in a later partition.\n>\n> In the attached patch, I've only added the live_parts field and made\n> use of it in a few locations where the knowledge is useful as an\n> optimisation. Right now apply_scanjoin_target_to_paths() appears in\n> profiles when planning a query to a partitioned table with many\n> partitions and just 1 or a few survive pruning. The reason for this is\n> simple; looping over a large almost entirely empty array is just slow\n> and using the Bitmapset as a sort of index into the interesting\n> elements of that array speeds it up, quite a bit.\n>\n> Since we've already put quite a bit of effort into making that fast,\n> then I think it might be worth adding this field even if it was just\n> for that purpose.\n>\n> With 10k empty hash partitions and 1 random one surviving partition\n> pruning, I see:\n>\n> Master:\n> 18.13% postgres postgres [.] apply_scanjoin_target_to_paths\n> 3.66% postgres postgres [.] AllocSetAlloc\n> 2.05% postgres postgres [.] hash_search_with_hash_value\n> 1.95% postgres libc-2.33.so [.] __memset_avx2_unaligned_erms\n> 1.88% postgres postgres [.] SearchCatCacheInternal\n> 1.55% postgres postgres [.] base_yyparse\n> 0.74% postgres postgres [.] get_relation_info\n> 0.68% postgres [kernel.kallsyms] [k] __d_lookup_rcu\n> 0.61% postgres postgres [.] palloc\n>\n> Patched:\n> 3.72% postgres postgres [.] AllocSetAlloc\n> 2.30% postgres postgres [.] hash_search_with_hash_value\n> 2.22% postgres postgres [.] SearchCatCacheInternal\n> 2.02% postgres libc-2.33.so [.] __memset_avx2_unaligned_erms\n> 1.88% postgres postgres [.] base_yyparse\n> 1.08% postgres postgres [.] palloc\n> 0.92% postgres [kernel.kallsyms] [k] __d_lookup_rcu\n>\n> $ cat setup.sql\n> create table hp (a int primary key, b int not null) partition by hash(a);\n> select 'create table hp'||x|| ' partition of hp for values with\n> (modulus 10000, remainder '||x||');' from generate_series(0,9999) x;\n> \\gexec\n>\n> $ cat select.sql\n> \\set p random(1,2000000)\n> select * from hp where a = :p\n>\n> master\n>\n> $ pgbench -n -f select.sql -c 1 -j 1 -T 60 postgres\n> tps = 2608.704218 (without initial connection time)\n> tps = 2607.641247 (without initial connection time)\n> tps = 2583.017011 (without initial connection time)\n>\n> patched\n>\n> $ pgbench -n -f select.sql -c 1 -j 1 -T 60 postgres\n> tps = 2715.993495 (without initial connection time)\n> tps = 2701.527640 (without initial connection time)\n> tps = 2707.343009 (without initial connection time)\n>\n> Does anyone have any thoughts about if this is worth a new field in RelOptInfo?\n\n+1 from me.\n\nI had proposed adding a live_parts bitmapset back in [1] (v12 work on\nspeeding up planning with partition) to address the\napply_scanjoin_target_to_paths() inefficiency among other things,\nthough Tom seemed to think that it wouldn't be worthwhile if only for\nthat purpose. He'd suggested that we fix things elsewhere such that\nthat function is not needed in the first place [2], something I keep\nthinking about in between doing other things, but never sit down to\nactually write a patch.\n\nGiven that you're proposing more uses for live_parts, maybe he'd be\nopen to the idea.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com\n\n[1] https://www.postgresql.org/message-id/3f280722-46f2-c2a4-4c19-2cfa28c6c1cd%40lab.ntt.co.jp\n[2] https://www.postgresql.org/message-id/3529.1554051867%40sss.pgh.pa.us\n\n\n", "msg_date": "Thu, 1 Jul 2021 14:49:02 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "On Thu, 1 Jul 2021 at 17:49, Amit Langote <amitlangote09@gmail.com> wrote:\n> Given that you're proposing more uses for live_parts, maybe he'd be\n> open to the idea.\n\nJust to make sure the new field in the 0001 patch gets good enough\nuse, I've attached the patch which includes more usages of the field.\n\n0002 adds a new field named interleaved_parts to PartitionBoundInfo\nwhich is populated for LIST partitioned tables with any partitions\nwhich have interleaved values, e.g FOR VALUES IN(3,5) and another\npartition with FOR VALUES IN(4), the 3,5 partition is \"interleaved\"\naround the partition for 4.\n\nThis combined with recording \"live_parts\" in the 0001 patch allows us\nto do ordered partition scans in many more cases for LIST partitioning\nand 1 more case with RANGE partitioning.\n\ncreate table mclparted (a int) partition by list(a);\ncreate table mclparted1 partition of mclparted for values in(1);\ncreate table mclparted2 partition of mclparted for values in(2);\ncreate table mclparted3_5 partition of mclparted for values in(3,5);\ncreate table mclparted4 partition of mclparted for values in(4);\ncreate index on mclparted (a);\n\nset enable_bitmapscan=0;\nset enable_sort=0;\n\n-- ordered scan using Append\nexplain (costs off) select * from mclparted where a in(1,2) order by a;\n QUERY PLAN\n------------------------------------------------------------------------\n Append\n -> Index Only Scan using mclparted1_a_idx on mclparted1 mclparted_1\n Index Cond: (a = ANY ('{1,2}'::integer[]))\n -> Index Only Scan using mclparted2_a_idx on mclparted2 mclparted_2\n Index Cond: (a = ANY ('{1,2}'::integer[]))\n\n-- no ordered scan due to interleaved partition. Must use Merge Append\nexplain (costs off) select * from mclparted where a in(3,4) order by a;\n QUERY PLAN\n----------------------------------------------------------------------------\n Merge Append\n Sort Key: mclparted.a\n -> Index Only Scan using mclparted3_5_a_idx on mclparted3_5 mclparted_1\n Index Cond: (a = ANY ('{3,4}'::integer[]))\n -> Index Only Scan using mclparted4_a_idx on mclparted4 mclparted_2\n Index Cond: (a = ANY ('{3,4}'::integer[]))\n\nCurrently, this is a bit more strict than maybe it needs to be. I'm\ndisabling the optimisation if any interleaved partitions remain after\npruning, however, it would be ok to allow them providing their\ninterleaved partner(s) were pruned. I think making that work might be\na bit more costly as we'd need to track all partitions that were\ninterleaved with each interleaved partition and ensure those were all\npruned. As far as I can see that requires storing a Bitmapset per\ninterleaved partition and makes the whole thing not so cheap. I'd\nreally like to keep all this stuff cheap as possible. That's why I\nended up calculating the interleaved partitions in\ncreate_list_bounds() rather than partitions_are_ordered().\n\nThe good news is that the code in partitions_are_ordered() became even\nmore simple as a result of this change. We can do ordered scan simply\nwhen !bms_overlap(live_parts, boundinfo->interleaved_parts).\n\nThe additional case we can now allow for RANGE partition is that we\ncan now do ordered scan when there is a DEFAULT partition but it was\npruned. Previously we had to disable the optimisation when there was a\nDEFAULT partition as we had no idea if it was pruned or not.\n\nDavid", "msg_date": "Sat, 10 Jul 2021 03:24:33 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "On Sat, 10 Jul 2021 at 03:24, David Rowley <dgrowleyml@gmail.com> wrote:\n> The good news is that the code in partitions_are_ordered() became even\n> more simple as a result of this change. We can do ordered scan simply\n> when !bms_overlap(live_parts, boundinfo->interleaved_parts).\n\nI've spent a bit more time revising the 0002 patch so that we're a bit\nmore strict about when we mark a partition as interleaved. For\nexample, if the DEFAULT partition happens to be the only partition,\nthen I'm no longer classing that as interleaved as there's nothing for\nit to be interleaved with.\n\nThis also fixes up the not-so-robust check that I had to check if the\nNULL partition allowed other Datums.\n\nDavid", "msg_date": "Mon, 12 Jul 2021 14:47:24 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "On Mon, Jul 12, 2021 at 11:47 AM David Rowley <dgrowleyml@gmail.com> wrote:\n> v3 patches\n\n0001 looks mostly fine, except I thought the following could be worded\nto say that the bitmap members are offsets into the part_rels array.\nTo avoid someone confusing them with RT indexes, for example.\n\n+ Bitmapset *live_parts; /* Bitmap with members to indicate which\n+ * partitions survived partition pruning. */\n\nOn 0002:\n\ninterleaved_parts idea looks clever. I wonder if you decided that\nit's maybe not worth setting that field in the joinrel's\nPartitionBoundInfos? For example, adding the code that you added in\ncreate_list_bounds() also in merge_list_bounds().\n\n... The definition of interleaved\n+ * is any partition that can contain multiple different values where exists at\n+ * least one other partition which could contain a value which is between the\n+ * multiple possible values in the other partition.\n\nThe sentence sounds a bit off starting at \"...where exists\". How about:\n\n\"A partition is considered interleaved if it contains multiple values\nsuch that there exists at least one other partition containing a value\nthat lies between those values [ in terms of partitioning-defined\nordering ].\"\n\nLooks fine otherwise.\n\n\n--\nAmit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 30 Jul 2021 16:10:37 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "On Fri, 30 Jul 2021 at 19:10, Amit Langote <amitlangote09@gmail.com> wrote:\n>\n> 0001 looks mostly fine, except I thought the following could be worded\n> to say that the bitmap members are offsets into the part_rels array.\n> To avoid someone confusing them with RT indexes, for example.\n>\n> + Bitmapset *live_parts; /* Bitmap with members to indicate which\n> + * partitions survived partition pruning. */\n\nYeah, agreed. I've adjusted that.\n\n> On 0002:\n>\n> interleaved_parts idea looks clever. I wonder if you decided that\n> it's maybe not worth setting that field in the joinrel's\n> PartitionBoundInfos? For example, adding the code that you added in\n> create_list_bounds() also in merge_list_bounds().\n\nCurrently generate_orderedappend_paths() only checks\npartitions_are_ordered() for base and other member rels, so setting\nthe field for join rels would be a waste of effort given that it's not\nused for anything.\n\nI've not really looked into the possibility of enabling this\noptimization for partition-wise joined rels. I know that there's a bit\nmore complexity now due to c8434d64c. I'm not really all that clear on\nwhich cases could be allowed here and which couldn't. It would require\nmore analysis and I'd say that's a different patch.\n\n> ... The definition of interleaved\n> + * is any partition that can contain multiple different values where exists at\n> + * least one other partition which could contain a value which is between the\n> + * multiple possible values in the other partition.\n>\n> The sentence sounds a bit off starting at \"...where exists\". How about:\n\nI must have spent too long writing SQL queries.\n\n> \"A partition is considered interleaved if it contains multiple values\n> such that there exists at least one other partition containing a value\n> that lies between those values [ in terms of partitioning-defined\n> ordering ].\"\n\nThat looks better. I took that with some small adjustments.\n\n> Looks fine otherwise.\n\nThanks for the review.\n\nI had another self review of these and I'm pretty happy with them. I'm\nquite glad to see the performance of querying a single partition of a\ntable with large numbers of partitions no longer tails off as much as\nit used to.\n\nDavid", "msg_date": "Mon, 2 Aug 2021 00:31:02 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "On Sun, Aug 1, 2021 at 5:31 AM David Rowley <dgrowleyml@gmail.com> wrote:\n\n> On Fri, 30 Jul 2021 at 19:10, Amit Langote <amitlangote09@gmail.com>\n> wrote:\n> >\n> > 0001 looks mostly fine, except I thought the following could be worded\n> > to say that the bitmap members are offsets into the part_rels array.\n> > To avoid someone confusing them with RT indexes, for example.\n> >\n> > + Bitmapset *live_parts; /* Bitmap with members to indicate which\n> > + * partitions survived partition\n> pruning. */\n>\n> Yeah, agreed. I've adjusted that.\n>\n> > On 0002:\n> >\n> > interleaved_parts idea looks clever. I wonder if you decided that\n> > it's maybe not worth setting that field in the joinrel's\n> > PartitionBoundInfos? For example, adding the code that you added in\n> > create_list_bounds() also in merge_list_bounds().\n>\n> Currently generate_orderedappend_paths() only checks\n> partitions_are_ordered() for base and other member rels, so setting\n> the field for join rels would be a waste of effort given that it's not\n> used for anything.\n>\n> I've not really looked into the possibility of enabling this\n> optimization for partition-wise joined rels. I know that there's a bit\n> more complexity now due to c8434d64c. I'm not really all that clear on\n> which cases could be allowed here and which couldn't. It would require\n> more analysis and I'd say that's a different patch.\n>\n> > ... The definition of interleaved\n> > + * is any partition that can contain multiple different values where\n> exists at\n> > + * least one other partition which could contain a value which is\n> between the\n> > + * multiple possible values in the other partition.\n> >\n> > The sentence sounds a bit off starting at \"...where exists\". How about:\n>\n> I must have spent too long writing SQL queries.\n>\n> > \"A partition is considered interleaved if it contains multiple values\n> > such that there exists at least one other partition containing a value\n> > that lies between those values [ in terms of partitioning-defined\n> > ordering ].\"\n>\n> That looks better. I took that with some small adjustments.\n>\n> > Looks fine otherwise.\n>\n> Thanks for the review.\n>\n> I had another self review of these and I'm pretty happy with them. I'm\n> quite glad to see the performance of querying a single partition of a\n> table with large numbers of partitions no longer tails off as much as\n> it used to.\n>\n> David\n>\nHi,\nSome minor comment.\n\nbq. Here we pass which partitioned\n\n partitioned -> partitions\n\nHere we look for partitions which\n+ * might be interleaved with other partitions and set the\n+ * interleaved_parts field with the partition indexes of any partitions\n+ * which may be interleaved with another partition.\n\nThe above seems a little bit repetitive. It can be shortened to remove\nrepetition.\n\nCheers\n\nOn Sun, Aug 1, 2021 at 5:31 AM David Rowley <dgrowleyml@gmail.com> wrote:On Fri, 30 Jul 2021 at 19:10, Amit Langote <amitlangote09@gmail.com> wrote:\n>\n> 0001 looks mostly fine, except I thought the following could be worded\n> to say that the bitmap members are offsets into the part_rels array.\n> To avoid someone confusing them with RT indexes, for example.\n>\n> +   Bitmapset  *live_parts;     /* Bitmap with members to indicate which\n> +                                * partitions survived partition pruning. */\n\nYeah, agreed. I've adjusted that.\n\n> On 0002:\n>\n> interleaved_parts idea looks clever.  I wonder if you decided that\n> it's maybe not worth setting that field in the joinrel's\n> PartitionBoundInfos?  For example, adding the code that you added in\n> create_list_bounds() also in merge_list_bounds().\n\nCurrently generate_orderedappend_paths() only checks\npartitions_are_ordered() for base and other member rels, so setting\nthe field for join rels would be a waste of effort given that it's not\nused for anything.\n\nI've not really looked into the possibility of enabling this\noptimization for partition-wise joined rels. I know that there's a bit\nmore complexity now due to c8434d64c. I'm not really all that clear on\nwhich cases could be allowed here and which couldn't. It would require\nmore analysis and I'd say that's a different patch.\n\n> ...  The definition of interleaved\n> + * is any partition that can contain multiple different values where exists at\n> + * least one other partition which could contain a value which is between the\n> + * multiple possible values in the other partition.\n>\n> The sentence sounds a bit off starting at \"...where exists\".  How about:\n\nI must have spent too long writing SQL queries.\n\n> \"A partition is considered interleaved if it contains multiple values\n> such that there exists at least one other partition containing a value\n> that lies between those values [ in terms of partitioning-defined\n> ordering ].\"\n\nThat looks better. I took that with some small adjustments.\n\n> Looks fine otherwise.\n\nThanks for the review.\n\nI had another self review of these and I'm pretty happy with them. I'm\nquite glad to see the performance of querying a single partition of a\ntable with large numbers of partitions no longer tails off as much as\nit used to.\n\nDavidHi,Some minor comment.bq. Here we pass which partitioned partitioned -> partitionsHere we look for partitions which+    * might be interleaved with other partitions and set the+    * interleaved_parts field with the partition indexes of any partitions+    * which may be interleaved with another partition.The above seems a little bit repetitive. It can be shortened to remove repetition.Cheers", "msg_date": "Sun, 1 Aug 2021 07:38:40 -0700", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": false, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "Thanks for having a look at this.\n\nOn Mon, 2 Aug 2021 at 02:33, Zhihong Yu <zyu@yugabyte.com> wrote:\n> Here we look for partitions which\n> + * might be interleaved with other partitions and set the\n> + * interleaved_parts field with the partition indexes of any partitions\n> + * which may be interleaved with another partition.\n>\n> The above seems a little bit repetitive. It can be shortened to remove repetition.\n\nI agree that the word \"partition\" is mentioned quite a few times. The\nonly one I can see that could be removed is the \"partition indexes\"\none. Likely the details about which bit we set can be left up to the\nstruct field comment in partbounds.h\n\nI've adjusted this to become:\n\n/*\n* Calculate interleaved partitions. Here we look for partitions which\n* might be interleaved with other partitions and set a bit in\n* interleaved_parts for any partitions which may be interleaved with\n* another partition.\n*/\n\nDavid\n\n\n", "msg_date": "Mon, 2 Aug 2021 10:20:40 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "On Sun, Aug 1, 2021 at 9:31 PM David Rowley <dgrowleyml@gmail.com> wrote:\n> On Fri, 30 Jul 2021 at 19:10, Amit Langote <amitlangote09@gmail.com> wrote:\n> > interleaved_parts idea looks clever. I wonder if you decided that\n> > it's maybe not worth setting that field in the joinrel's\n> > PartitionBoundInfos? For example, adding the code that you added in\n> > create_list_bounds() also in merge_list_bounds().\n>\n> Currently generate_orderedappend_paths() only checks\n> partitions_are_ordered() for base and other member rels, so setting\n> the field for join rels would be a waste of effort given that it's not\n> used for anything.\n>\n> I've not really looked into the possibility of enabling this\n> optimization for partition-wise joined rels. I know that there's a bit\n> more complexity now due to c8434d64c. I'm not really all that clear on\n> which cases could be allowed here and which couldn't. It would require\n> more analysis and I'd say that's a different patch.\n\nYeah, that makes sense.\n\n> > ... The definition of interleaved\n> > + * is any partition that can contain multiple different values where exists at\n> > + * least one other partition which could contain a value which is between the\n> > + * multiple possible values in the other partition.\n> >\n> > The sentence sounds a bit off starting at \"...where exists\". How about:\n>\n> I must have spent too long writing SQL queries.\n\nHah.\n\n> I had another self review of these and I'm pretty happy with them. I'm\n> quite glad to see the performance of querying a single partition of a\n> table with large numbers of partitions no longer tails off as much as\n> it used to.\n\nNice, glad to see the apply_scanjoin_target_to_paths() loop taken care of.\n\nThank you.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Mon, 2 Aug 2021 11:59:19 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "On Mon, 2 Aug 2021 at 00:31, David Rowley <dgrowleyml@gmail.com> wrote:\n> I had another self review of these and I'm pretty happy with them. I'm\n> quite glad to see the performance of querying a single partition of a\n> table with large numbers of partitions no longer tails off as much as\n> it used to.\n\nI did some profiling and benchmarking on master and with the v4 patch.\nWith a hash partitioned table containing 8192 partitions I see the\nfollowing when running a query that selects a value from a single\npartition:\n\n 19.39% postgres [.] apply_scanjoin_target_to_paths\n 5.35% postgres [.] base_yyparse\n 4.71% postgres [.] AllocSetAlloc\n 2.86% libc-2.33.so [.] __memset_avx2_unaligned_erms\n 2.17% postgres [.] SearchCatCacheInternal\n\nWith the patched version, I see:\n\n 5.89% postgres [.] AllocSetAlloc\n 3.97% postgres [.] base_yyparse\n 3.87% libc-2.33.so [.] __memset_avx2_unaligned_erms\n 2.44% postgres [.] SearchCatCacheInternal\n 1.29% postgres [.] hash_search_with_hash_value\n\nI'm getting:\nmaster: 16613 tps\npatched: 22078 tps\n\nSo there's about 32% performance improvement with this number of\npartitions. These results are not the same as my original email here\nas I've only recently discovered that I really need to pin pgbench and\nthe postgres backend to the same CPU core to get good and stable\nperformance from a single threaded pgbench job.\n\nFWIW, the next thing there on the profile the following line in\nexpand_partitioned_rtentry()\n\nrelinfo->part_rels = (RelOptInfo **) palloc0(relinfo->nparts *\nsizeof(RelOptInfo *));\n\nIf anyone has any objections to both the v4 0001 and 0002 patch, can\nthey let me know soon. I'm currently seeing no reason that they can't\ngo in.\n\nDavid\n\n\n", "msg_date": "Mon, 2 Aug 2021 20:16:11 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "On Mon, 2 Aug 2021 at 20:16, David Rowley <dgrowleyml@gmail.com> wrote:\n> If anyone has any objections to both the v4 0001 and 0002 patch, can\n> they let me know soon. I'm currently seeing no reason that they can't\n> go in.\n\nI've now pushed both of these. Thanks for the reviews.\n\nDavid\n\n\n", "msg_date": "Tue, 3 Aug 2021 12:27:38 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "Hi David,\n\nOn Mon, Aug 2, 2021 at 11:59 AM Amit Langote <amitlangote09@gmail.com> wrote:\n> On Sun, Aug 1, 2021 at 9:31 PM David Rowley <dgrowleyml@gmail.com> wrote:\n> > On Fri, 30 Jul 2021 at 19:10, Amit Langote <amitlangote09@gmail.com> wrote:\n> > > interleaved_parts idea looks clever. I wonder if you decided that\n> > > it's maybe not worth setting that field in the joinrel's\n> > > PartitionBoundInfos? For example, adding the code that you added in\n> > > create_list_bounds() also in merge_list_bounds().\n> >\n> > Currently generate_orderedappend_paths() only checks\n> > partitions_are_ordered() for base and other member rels, so setting\n> > the field for join rels would be a waste of effort given that it's not\n> > used for anything.\n> >\n> > I've not really looked into the possibility of enabling this\n> > optimization for partition-wise joined rels. I know that there's a bit\n> > more complexity now due to c8434d64c. I'm not really all that clear on\n> > which cases could be allowed here and which couldn't. It would require\n> > more analysis and I'd say that's a different patch.\n>\n> Yeah, that makes sense.\n\nRelated to the above, I noticed while looking at\nbuild_merged_partition_bounds() that db632fbca3 missed adding a line\nto that function to set interleaved_parts to NULL. Because the\nPartitionBoundInfo is only palloc'd (not palloc0'd), interleaved_parts\nof a \"merged\" bounds struct ends up pointing to garbage, so let's fix\nthat. Attached a patch.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com", "msg_date": "Thu, 30 Sep 2021 16:25:36 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "On Thu, 30 Sept 2021 at 20:25, Amit Langote <amitlangote09@gmail.com> wrote:\n> Related to the above, I noticed while looking at\n> build_merged_partition_bounds() that db632fbca3 missed adding a line\n> to that function to set interleaved_parts to NULL. Because the\n> PartitionBoundInfo is only palloc'd (not palloc0'd), interleaved_parts\n> of a \"merged\" bounds struct ends up pointing to garbage, so let's fix\n> that. Attached a patch.\n\nThanks for the patch.\n\nI think we also need to document that interleaved_parts is not set for\njoin relations, otherwise someone may in the future try to use that\nfield for an optimisation for join relations. At the moment, per\ngenerate_orderedappend_paths, we only handle IS_SIMPLE_REL type\nrelations.\n\nI've attached a patch that updates the comments to mention this.\n\nDavid", "msg_date": "Fri, 1 Oct 2021 11:07:28 +1300", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "On Fri, Oct 1, 2021 at 7:07 AM David Rowley <dgrowleyml@gmail.com> wrote:\n> On Thu, 30 Sept 2021 at 20:25, Amit Langote <amitlangote09@gmail.com> wrote:\n> > Related to the above, I noticed while looking at\n> > build_merged_partition_bounds() that db632fbca3 missed adding a line\n> > to that function to set interleaved_parts to NULL. Because the\n> > PartitionBoundInfo is only palloc'd (not palloc0'd), interleaved_parts\n> > of a \"merged\" bounds struct ends up pointing to garbage, so let's fix\n> > that. Attached a patch.\n>\n> Thanks for the patch.\n>\n> I think we also need to document that interleaved_parts is not set for\n> join relations, otherwise someone may in the future try to use that\n> field for an optimisation for join relations. At the moment, per\n> generate_orderedappend_paths, we only handle IS_SIMPLE_REL type\n> relations.\n>\n> I've attached a patch that updates the comments to mention this.\n\nLooks good to me. Thanks.\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 1 Oct 2021 09:37:27 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" }, { "msg_contents": "On Fri, 1 Oct 2021 at 13:37, Amit Langote <amitlangote09@gmail.com> wrote:\n> > I've attached a patch that updates the comments to mention this.\n>\n> Looks good to me. Thanks.\n\nThanks. Pushed.\n\nDavid\n\n\n", "msg_date": "Fri, 1 Oct 2021 15:10:58 +1300", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Record a Bitmapset of non-pruned partitions" } ]
[ { "msg_contents": "Hi.\n\nI've seen the following effect on PostgreSQL 14 stable branch.\nIndex, created on partitioned table, disappears from pg_dump or psql \\d \noutput.\nThis seems to begin after analyze. Partitoned relation relhasindex \npg_class field suddenly becomes false.\n\nThe issue happens after\n\ncommit 0e69f705cc1a3df273b38c9883fb5765991e04fe (HEAD, refs/bisect/bad)\nAuthor: Alvaro Herrera <alvherre@alvh.no-ip.org>\nDate: Fri Apr 9 11:29:08 2021 -0400\n\n Set pg_class.reltuples for partitioned tables\n\n When commit 0827e8af70f4 added auto-analyze support for partitioned\n tables, it included code to obtain reltuples for the partitioned \ntable\n as a number of catalog accesses to read pg_class.reltuples for each\n partition. That's not only very inefficient, but also problematic\n because autovacuum doesn't hold any locks on any of those tables -- \nand\n doesn't want to. Replace that code with a read of \npg_class.reltuples\n for the partitioned table, and make sure ANALYZE and TRUNCATE \nproperly\n maintain that value.\n\n I found no code that would be affected by the change of relpages \nfrom\n zero to non-zero for partitioned tables, and no other code that \nshould\n be maintaining it, but if there is, hopefully it'll be an easy fix.\n\n Per buildfarm.\n\n Author: Álvaro Herrera <alvherre@alvh.no-ip.org>\n Reviewed-by: Zhihong Yu <zyu@yugabyte.com>\n\nIt seems that in this commit we unconditionally overwrite this data with \n0.\nI've tried to fix it by getting this information when inh is true and \nignoring nindexes when inh is not true.\n\n-- \nBest regards,\nAlexander Pyhalov,\nPostgres Professional", "msg_date": "Wed, 30 Jun 2021 17:26:08 +0300", "msg_from": "Alexander Pyhalov <a.pyhalov@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Partitioned index can be not dumped" }, { "msg_contents": "Alexander Pyhalov писал 2021-06-30 17:26:\n> Hi.\n> \n> \n\nSorry, test had an issue.\n\n-- \nBest regards,\nAlexander Pyhalov,\nPostgres Professional", "msg_date": "Wed, 30 Jun 2021 17:33:50 +0300", "msg_from": "Alexander Pyhalov <a.pyhalov@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Partitioned index can be not dumped" }, { "msg_contents": "On 2021-Jun-30, Alexander Pyhalov wrote:\n\n> Hi.\n> \n> I've seen the following effect on PostgreSQL 14 stable branch.\n> Index, created on partitioned table, disappears from pg_dump or psql \\d\n> output.\n> This seems to begin after analyze. Partitoned relation relhasindex pg_class\n> field suddenly becomes false.\n\nUh, ouch.\n\nI'll look into this shortly.\n\n\n\n-- \n�lvaro Herrera Valdivia, Chile\n https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Wed, 30 Jun 2021 10:44:13 -0400", "msg_from": "=?utf-8?Q?=C3=81lvaro?= Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Partitioned index can be not dumped" }, { "msg_contents": "On 2021-Jun-30, Alexander Pyhalov wrote:\n\n> I've seen the following effect on PostgreSQL 14 stable branch.\n> Index, created on partitioned table, disappears from pg_dump or psql \\d\n> output.\n> This seems to begin after analyze. Partitoned relation relhasindex pg_class\n> field suddenly becomes false.\n\nYeah, that seems correct.\n\nI didn't verify your test case, but after looking at the code I thought\nthere was a bit too much churn and the new conditions looked quite messy\nand unexplained. It seems simpler to be explicit at the start about\nwhat we're doing, and keep nindexes=0 for partitioned tables; with that,\nthe code works unchanged because the \"for\" loops do nothing without\nhaving to check for anything. My proposal is attached.\n\nI did run the tests and they do pass, but I didn't look very closely at\nwhat the tests are actually doing.\n\nI noticed that part of that comment seems to be a leftover from ... I\ndon't know when: \"We do not analyze index columns if there was an\nexplicit column list in the ANALYZE command, however.\" I suppose this\nis about some code that was removed, but I didn't dig into it.\n\n-- \n�lvaro Herrera Valdivia, Chile\n\"How strange it is to find the words \"Perl\" and \"saner\" in such close\nproximity, with no apparent sense of irony. I doubt that Larry himself\ncould have managed it.\" (ncm, http://lwn.net/Articles/174769/)", "msg_date": "Wed, 30 Jun 2021 14:54:09 -0400", "msg_from": "=?utf-8?Q?=C3=81lvaro?= Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Partitioned index can be not dumped" }, { "msg_contents": "On Wed, Jun 30, 2021 at 11:54 AM Álvaro Herrera <alvherre@alvh.no-ip.org>\nwrote:\n\n> On 2021-Jun-30, Alexander Pyhalov wrote:\n>\n> > I've seen the following effect on PostgreSQL 14 stable branch.\n> > Index, created on partitioned table, disappears from pg_dump or psql \\d\n> > output.\n> > This seems to begin after analyze. Partitoned relation relhasindex\n> pg_class\n> > field suddenly becomes false.\n>\n> Yeah, that seems correct.\n>\n> I didn't verify your test case, but after looking at the code I thought\n> there was a bit too much churn and the new conditions looked quite messy\n> and unexplained. It seems simpler to be explicit at the start about\n> what we're doing, and keep nindexes=0 for partitioned tables; with that,\n> the code works unchanged because the \"for\" loops do nothing without\n> having to check for anything. My proposal is attached.\n>\n> I did run the tests and they do pass, but I didn't look very closely at\n> what the tests are actually doing.\n>\n> I noticed that part of that comment seems to be a leftover from ... I\n> don't know when: \"We do not analyze index columns if there was an\n> explicit column list in the ANALYZE command, however.\" I suppose this\n> is about some code that was removed, but I didn't dig into it.\n>\n> --\n> Álvaro Herrera Valdivia, Chile\n> \"How strange it is to find the words \"Perl\" and \"saner\" in such close\n> proximity, with no apparent sense of irony. I doubt that Larry himself\n> could have managed it.\" (ncm, http://lwn.net/Articles/174769/)\n\nHi,\nnit:\n- if (hasindex)\n+ if (nindexes > 0)\n\nIt seems hasindex is no longer needed since nindexes is checked.\n\nCheers\n\nOn Wed, Jun 30, 2021 at 11:54 AM Álvaro Herrera <alvherre@alvh.no-ip.org> wrote:On 2021-Jun-30, Alexander Pyhalov wrote:\n\n> I've seen the following effect on PostgreSQL 14 stable branch.\n> Index, created on partitioned table, disappears from pg_dump or psql \\d\n> output.\n> This seems to begin after analyze. Partitoned relation relhasindex pg_class\n> field suddenly becomes false.\n\nYeah, that seems correct.\n\nI didn't verify your test case, but after looking at the code I thought\nthere was a bit too much churn and the new conditions looked quite messy\nand unexplained.  It seems simpler to be explicit at the start about\nwhat we're doing, and keep nindexes=0 for partitioned tables; with that,\nthe code works unchanged because the \"for\" loops do nothing without\nhaving to check for anything.  My proposal is attached.\n\nI did run the tests and they do pass, but I didn't look very closely at\nwhat the tests are actually doing.\n\nI noticed that part of that comment seems to be a leftover from ... I\ndon't know when: \"We do not analyze index columns if there was an\nexplicit column list in the ANALYZE command, however.\"  I suppose this\nis about some code that was removed, but I didn't dig into it.\n\n-- \nÁlvaro Herrera                        Valdivia, Chile\n\"How strange it is to find the words \"Perl\" and \"saner\" in such close\nproximity, with no apparent sense of irony. I doubt that Larry himself\ncould have managed it.\"         (ncm, http://lwn.net/Articles/174769/)Hi,nit:-       if (hasindex)+       if (nindexes > 0) It seems hasindex is no longer needed since nindexes is checked.Cheers", "msg_date": "Wed, 30 Jun 2021 12:26:20 -0700", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": false, "msg_subject": "Re: Partitioned index can be not dumped" }, { "msg_contents": "Álvaro Herrera писал 2021-06-30 21:54:\n> On 2021-Jun-30, Alexander Pyhalov wrote:\n> \n>> I've seen the following effect on PostgreSQL 14 stable branch.\n>> Index, created on partitioned table, disappears from pg_dump or psql \n>> \\d\n>> output.\n>> This seems to begin after analyze. Partitoned relation relhasindex \n>> pg_class\n>> field suddenly becomes false.\n> \n> Yeah, that seems correct.\n> \n> I didn't verify your test case, but after looking at the code I thought\n> there was a bit too much churn and the new conditions looked quite \n> messy\n> and unexplained. It seems simpler to be explicit at the start about\n> what we're doing, and keep nindexes=0 for partitioned tables; with \n> that,\n> the code works unchanged because the \"for\" loops do nothing without\n> having to check for anything. My proposal is attached.\n> \n> I did run the tests and they do pass, but I didn't look very closely at\n> what the tests are actually doing.\n> \n> I noticed that part of that comment seems to be a leftover from ... I\n> don't know when: \"We do not analyze index columns if there was an\n> explicit column list in the ANALYZE command, however.\" I suppose this\n> is about some code that was removed, but I didn't dig into it.\n\nLooks good. It seems this comment refers to line 455.\n\n 445 if (nindexes > 0)\n 446 {\n 447 indexdata = (AnlIndexData *) palloc0(nindexes * \nsizeof(AnlIndexData));\n 448 for (ind = 0; ind < nindexes; ind++)\n 449 {\n 450 AnlIndexData *thisdata = &indexdata[ind];\n 451 IndexInfo *indexInfo;\n 452\n 453 thisdata->indexInfo = indexInfo = \nBuildIndexInfo(Irel[ind]);\n 454 thisdata->tupleFract = 1.0; /* fix later if partial */\n 455 if (indexInfo->ii_Expressions != NIL && va_cols == NIL)\n 456 {\n 457 ListCell *indexpr_item = \nlist_head(indexInfo->ii_Expressions);\n 458\n 459 thisdata->vacattrstats = (VacAttrStats **)\n 460 palloc(indexInfo->ii_NumIndexAttrs * \nsizeof(VacAttrStats *));\n\nAlso I've added non-necessary new line in test.\nRestored comment and removed new line.\n-- \nBest regards,\nAlexander Pyhalov,\nPostgres Professional", "msg_date": "Wed, 30 Jun 2021 22:28:42 +0300", "msg_from": "Alexander Pyhalov <a.pyhalov@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Partitioned index can be not dumped" }, { "msg_contents": "On 2021-Jun-30, Zhihong Yu wrote:\n\n> Hi,\n> nit:\n> - if (hasindex)\n> + if (nindexes > 0)\n> \n> It seems hasindex is no longer needed since nindexes is checked.\n\nIt's still used to call vac_update_relstats(). We want nindexes to be 0\nfor partitioned tables, but still pass true when there are indexes.\n\nPlease don't forget to trim the text of the email you're replying to.\n\n-- \n�lvaro Herrera Valdivia, Chile\n https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Wed, 30 Jun 2021 17:32:40 -0400", "msg_from": "=?utf-8?Q?=C3=81lvaro?= Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Partitioned index can be not dumped" }, { "msg_contents": "On Wed, Jun 30, 2021 at 2:32 PM Álvaro Herrera <alvherre@alvh.no-ip.org>\nwrote:\n\n> On 2021-Jun-30, Zhihong Yu wrote:\n>\n> > Hi,\n> > nit:\n> > - if (hasindex)\n> > + if (nindexes > 0)\n> >\n> > It seems hasindex is no longer needed since nindexes is checked.\n>\n> It's still used to call vac_update_relstats(). We want nindexes to be 0\n> for partitioned tables, but still pass true when there are indexes.\n>\nHi,\nIn that case, I wonder whether nindexes can be negated following the call\nto vac_open_indexes().\n\n vac_open_indexes(onerel, AccessShareLock, &nindexes, &Irel);\n+ nindexes = -nindexes;\n\nThat way, hasindex can be dropped.\nvac_update_relstats() call would become:\n\n vac_update_relstats(onerel, -1, totalrows,\n- 0, false, InvalidTransactionId,\n+ 0, nindexes != 0, InvalidTransactionId,\n\nMy thinking is that without hasindex, the code is easier to maintain.\n\nThanks\n\n\n> Please don't forget to trim the text of the email you're replying to.\n>\n> --\n> Álvaro Herrera Valdivia, Chile\n> https://www.EnterpriseDB.com/\n>\n\nOn Wed, Jun 30, 2021 at 2:32 PM Álvaro Herrera <alvherre@alvh.no-ip.org> wrote:On 2021-Jun-30, Zhihong Yu wrote:\n\n> Hi,\n> nit:\n> -       if (hasindex)\n> +       if (nindexes > 0)\n> \n> It seems hasindex is no longer needed since nindexes is checked.\n\nIt's still used to call vac_update_relstats().  We want nindexes to be 0\nfor partitioned tables, but still pass true when there are indexes.Hi,In that case, I wonder whether nindexes can be negated following the call to vac_open_indexes().        vac_open_indexes(onerel, AccessShareLock, &nindexes, &Irel);+       nindexes = -nindexes;That way, hasindex can be dropped.vac_update_relstats() call would become:        vac_update_relstats(onerel, -1, totalrows,-                           0, false, InvalidTransactionId,+                           0, nindexes != 0, InvalidTransactionId,My thinking is that without hasindex, the code is easier to maintain.Thanks\n\nPlease don't forget to trim the text of the email you're replying to.\n\n-- \nÁlvaro Herrera                        Valdivia, Chile\n                        https://www.EnterpriseDB.com/", "msg_date": "Wed, 30 Jun 2021 14:56:17 -0700", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": false, "msg_subject": "Re: Partitioned index can be not dumped" }, { "msg_contents": "On 2021-Jun-30, Zhihong Yu wrote:\n\n> Hi,\n> In that case, I wonder whether nindexes can be negated following the call\n> to vac_open_indexes().\n> \n> vac_open_indexes(onerel, AccessShareLock, &nindexes, &Irel);\n> + nindexes = -nindexes;\n> \n> That way, hasindex can be dropped.\n> vac_update_relstats() call would become:\n> \n> vac_update_relstats(onerel, -1, totalrows,\n> - 0, false, InvalidTransactionId,\n> + 0, nindexes != 0, InvalidTransactionId,\n\nPerhaps this works, but I don't think it's a readability improvement.\n\n> My thinking is that without hasindex, the code is easier to maintain.\n\nYou have one less variable but one additional concept (negative\nnindexes). It doesn't seem easier to me, TBH, rather the opposite.\n\n-- \n�lvaro Herrera 39�49'30\"S 73�17'W\n https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Wed, 30 Jun 2021 17:57:36 -0400", "msg_from": "=?utf-8?Q?=C3=81lvaro?= Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Partitioned index can be not dumped" }, { "msg_contents": "On 2021-Jun-30, Alexander Pyhalov wrote:\n\n> Looks good. It seems this comment refers to line 455.\n\nAh, so it does.\n\nI realized that we don't need to do vac_open_indexes for partitioned\ntables: it is sufficient to know whether there are any indexes at all.\nSo I replaced that with RelationGetIndexList() and checking if the list\nis nonempty, specifically for the partitioned table case. Pushed now.\n\nThanks for reporting and fixing this, and to Zhihong Yu for reviewing.\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\"Investigación es lo que hago cuando no sé lo que estoy haciendo\"\n(Wernher von Braun)\n\n\n", "msg_date": "Thu, 1 Jul 2021 13:01:19 -0400", "msg_from": "=?utf-8?Q?=C3=81lvaro?= Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Partitioned index can be not dumped" } ]
[ { "msg_contents": "When PostgresNode::system_or_bail() fails, it's quite opaque as to what\nis happening. This patch improves things by printing some detail, as\nsuggested in Perl's doc for system().\n\n-- \n�lvaro Herrera 39�49'30\"S 73�17'W\n https://www.EnterpriseDB.com/", "msg_date": "Wed, 30 Jun 2021 11:24:33 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": true, "msg_subject": "trivial improvement to system_or_bail" }, { "msg_contents": "> On 30 Jun 2021, at 17:24, Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> \n> When PostgresNode::system_or_bail() fails, it's quite opaque as to what\n> is happening. This patch improves things by printing some detail, as\n> suggested in Perl's doc for system().\n\n+1 on this from reading the patch.\n\n+\t\t\tBAIL_OUT(\"system $_[0] failed: $!\\n\");\nI wonder if we should take more inspiration from the Perl manual and change it\nto \"failed to execute\" to make it clear that the failure was in executing the\nprogram, not from the program itself?\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Wed, 30 Jun 2021 17:36:34 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: trivial improvement to system_or_bail" }, { "msg_contents": "Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> When PostgresNode::system_or_bail() fails, it's quite opaque as to what\n> is happening. This patch improves things by printing some detail, as\n> suggested in Perl's doc for system().\n\n+1 for adding the extra details, but another thing that I've always found\nvery confusing is just the phrasing of the message itself. It makes\nno sense unless (a) you know that \"system\" is Perl's function for\nexecuting a shell command, (b) you are familiar with Perl's generally\ncavalier approach to parentheses, and (c) you are also unbothered by\nwhether the word \"failed\" is part of the message text or the command\nbeing complained of. We really need to do something to set off the\nshell command's text from the surrounding verbiage a little better.\n\nI'd prefer something like\n\n\tcommand \"pg_ctl start\" failed: details here\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 30 Jun 2021 11:45:27 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: trivial improvement to system_or_bail" }, { "msg_contents": "On 2021-Jun-30, Daniel Gustafsson wrote:\n\n> +\t\t\tBAIL_OUT(\"system $_[0] failed: $!\\n\");\n> I wonder if we should take more inspiration from the Perl manual and change it\n> to \"failed to execute\" to make it clear that the failure was in executing the\n> program, not from the program itself?\n\nYou're right, that's a good distinction to make. I've used this\nwording. Thanks.\n\n> +1 for adding the extra details, but another thing that I've always found\n> very confusing is just the phrasing of the message itself. It makes\n> no sense unless (a) you know that \"system\" is Perl's function for\n> executing a shell command, (b) you are familiar with Perl's generally\n> cavalier approach to parentheses, and (c) you are also unbothered by\n> whether the word \"failed\" is part of the message text or the command\n> being complained of. We really need to do something to set off the\n> shell command's text from the surrounding verbiage a little better.\n> \n> I'd prefer something like\n> \n> \tcommand \"pg_ctl start\" failed: details here\n\nDone that way, thanks for the suggestion.\n\nFailures now look like this, respectively:\n\nBailout called. Further testing stopped: failed to execute command \"finitdb -D /home/alvherre/Code/pgsql-build/master/src/test/recovery/tmp_check/t_019_replslot_limit_primary_data/pgdata -A trust -N --wal-segsize=1\": No such file or directory\n\nBailout called. Further testing stopped: command \"initdb -0D /home/alvherre/Code/pgsql-build/master/src/test/recovery/tmp_check/t_019_replslot_limit_primary_data/pgdata -A trust -N --wal-segsize=1\" exited with value 1\n\nBailout called. Further testing stopped: command \"initdb -0D /home/alvherre/Code/pgsql-build/master/src/test/recovery/tmp_check/t_019_replslot_limit_primary_data/pgdata -A trust -N --wal-segsize=1\" died with signal 11\n\n\nPreviously it was just\n\nBailout called. Further testing stopped: system initdb failed\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\"I must say, I am absolutely impressed with what pgsql's implementation of\nVALUES allows me to do. It's kind of ridiculous how much \"work\" goes away in\nmy code. Too bad I can't do this at work (Oracle 8/9).\" (Tom Allison)\n http://archives.postgresql.org/pgsql-general/2007-06/msg00016.php\n\n\n", "msg_date": "Tue, 6 Jul 2021 17:59:11 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": true, "msg_subject": "Re: trivial improvement to system_or_bail" }, { "msg_contents": "> On 6 Jul 2021, at 23:59, Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n\n> Failures now look like this, respectively:\n> \n> Bailout called. Further testing stopped: failed to execute command \"finitdb -D /home/alvherre/Code/pgsql-build/master/src/test/recovery/tmp_check/t_019_replslot_limit_primary_data/pgdata -A trust -N --wal-segsize=1\": No such file or directory\n> \n> Bailout called. Further testing stopped: command \"initdb -0D /home/alvherre/Code/pgsql-build/master/src/test/recovery/tmp_check/t_019_replslot_limit_primary_data/pgdata -A trust -N --wal-segsize=1\" exited with value 1\n> \n> Bailout called. Further testing stopped: command \"initdb -0D /home/alvherre/Code/pgsql-build/master/src/test/recovery/tmp_check/t_019_replslot_limit_primary_data/pgdata -A trust -N --wal-segsize=1\" died with signal 11\n> \n> \n> Previously it was just\n> \n> Bailout called. Further testing stopped: system initdb failed\n\nThat is no doubt going to be helpful, thanks!\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Wed, 7 Jul 2021 09:13:58 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: trivial improvement to system_or_bail" } ]
[ { "msg_contents": "-hackers,\n\nThis patch adds the concept of \"multiconnect\" to pgbench (better\nterminology welcome). The basic idea here is to allow connections made\nwith pgbench to use different auth values or connect to multiple\ndatabases. We implement this using a user-provided PGSERVICEFILE and\nchoosing a PGSERVICE from this based on a number of strategies.\n(Currently the only supported strategies are round robin or random.)\n\nThere is definite room for improvement here; at the very least, teaching\n`pgbench -i` about all of the distinct DBs referenced in this service\nfile would ensure that initialization works as expected in all places.\nFor now, we are punting initialization to the user in this version of\nthe patch if using more that one database in the given service file.\n\nBest,\n\nDavid", "msg_date": "Wed, 30 Jun 2021 11:53:03 -0500", "msg_from": "David Christensen <david.christensen@crunchydata.com>", "msg_from_op": true, "msg_subject": "[PATCH] pgbench: add multiconnect option" }, { "msg_contents": "Hello David,\n\n> This patch adds the concept of \"multiconnect\" to pgbench (better\n> terminology welcome).\n\nGood. I was thinking of adding such capability, possibly for handling \nconnection errors and reconnecting…\n\n> The basic idea here is to allow connections made with pgbench to use \n> different auth values or connect to multiple databases. We implement \n> this using a user-provided PGSERVICEFILE and choosing a PGSERVICE from \n> this based on a number of strategies. (Currently the only supported \n> strategies are round robin or random.)\n\nI was thinking of providing a allowing a list of conninfo strings with \nrepeated options, eg --conninfo \"foo\" --conninfo \"bla\"…\n\nYour approach using PGSERVICEFILE also make sense!\n\nMaybe it could be simplified, the code base reduced, and provide more \nbenefits, by mixing both ideas.\n\nIn particular, pgbench parses the file but then it will be read also by \nlibpq, yuk yuk.\n\nAlso, I do not like that PGSERVICE is overriden by pgbench, while other \noptions are passed with the parameters approach in doConnect. It would \nmake proce sense to add a \"service\" field to the parameters for \nconsistency, if this approach was to be pursued.\n\nOn reflexion, I'd suggest to use the --conninfo (or some other name) \napproach, eg \"pgbench --conninfo='service=s1' --conninfo='service=s2'\" and \nusers just have to set PGSERVICEFILE env themselves, which I think is \nbetter than pgbench overriding env variables behind their back.\n\nThis allow to have a service file with more connections and just tell \npgbench which ones to use, which is the expected way to use this feature. \nThis drops file parsing.\n\nI can only see benefit to this simplified approach.\nWhat do you think?\n\nAbout the patch:\n\nThere are warnings about trailing whitespaces when applying the patch, and \nthere are some tabbing issues in the file.\n\nI would not consume \"-g\" option unless there is some logical link with the \nfeature. I'd be okay with \"-m\" if it is still needed. I would suggest to \nuse it for the choice strategy?\n\nstringinfo: We already have PQExpBuffer imported, could we use that \ninstead? Having two set of struct/functions which do the same in the same \nsource file does not look like a good idea. If we do not parse the file, \nnothing is needed, which is a relief.\n\nAttached is my work-in-progress start at adding conninfo, that would need \nto be improved with strategies.\n\n-- \nFabien.", "msg_date": "Thu, 1 Jul 2021 12:22:45 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "On Thu, Jul 01, 2021 at 12:22:45PM +0200, Fabien COELHO wrote:\n> Good. I was thinking of adding such capability, possibly for handling\n> connection errors and reconnecting…\n\nround-robin and random make sense. I am wondering how round-robin\nwould work with -C, though? Would you just reuse the same connection\nstring as the one chosen at the starting point.\n\n> I was thinking of providing a allowing a list of conninfo strings with\n> repeated options, eg --conninfo \"foo\" --conninfo \"bla\"…\n\nThat was my first thought when reading the subject of this thread:\ncreate a list of connection strings and pass one of them to\ndoConnect() to grab the properties looked for. That's a bit confusing\nthough as pgbench does not support directly connection strings, and we\nshould be careful to keep fallback_application_name intact.\n\n> Your approach using PGSERVICEFILE also make sense!\n\nI am not sure that's actually needed here, as it is possible to pass\ndown a service name within a connection string. I think that you'd\nbetter leave libpq do all the work related to a service file, if\nspecified. pgbench does not need to know any of that.\n--\nMichael", "msg_date": "Fri, 27 Aug 2021 15:32:13 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "Bonjour Michaël,\n\n>> Good. I was thinking of adding such capability, possibly for handling\n>> connection errors and reconnecting…\n>\n> round-robin and random make sense. I am wondering how round-robin\n> would work with -C, though? Would you just reuse the same connection\n> string as the one chosen at the starting point.\n\nWell, not necessarily, but this is debatable.\n\n>> I was thinking of providing a allowing a list of conninfo strings with\n>> repeated options, eg --conninfo \"foo\" --conninfo \"bla\"…\n>\n> That was my first thought when reading the subject of this thread:\n> create a list of connection strings and pass one of them to\n> doConnect() to grab the properties looked for. That's a bit confusing\n> though as pgbench does not support directly connection strings,\n\nThey are supported because libpq silently assumes that \"dbname\" can be a \nfull connection string.\n\n> and we should be careful to keep fallback_application_name intact.\n\nHmmm. See attached patch, ISTM that it does the right thing.\n\n>> Your approach using PGSERVICEFILE also make sense!\n>\n> I am not sure that's actually needed here, as it is possible to pass\n> down a service name within a connection string. I think that you'd\n> better leave libpq do all the work related to a service file, if\n> specified. pgbench does not need to know any of that.\n\nYes, this is an inconvenient with this approach, part of libpq machinery\nis more or less replicated in pgbench, which is quite annoying, and less \npowerful.\n\nAttached my work-in-progress version, with a few open issues (eg probably \nnot thread safe), but comments about the provided feature are welcome.\n\nI borrowed the \"strategy\" option, renamed policy, from the initial patch. \nPgbench just accepts several connection strings as parameters, eg:\n\n pgbench ... \"service=db1\" \"service=db2\" \"service=db3\"\n\nThe next stage is to map scripts to connections types and connections\nto connection types, so that pgbench could run W transactions against a \nprimary and R transactions agains a hot standby, for instance. I have a \nsome design for that, but nothing is implemented.\n\nThere is also the combination with the error handling patch to consider: \nif a connection fails, a connection to a replica could be issued instead.\n\n-- \nFabien.", "msg_date": "Fri, 27 Aug 2021 18:40:22 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "> >> Good. I was thinking of adding such capability, possibly for handling\n> >> connection errors and reconnecting…\n> >\n> > round-robin and random make sense. I am wondering how round-robin\n> > would work with -C, though? Would you just reuse the same connection\n> > string as the one chosen at the starting point.\n>\n> Well, not necessarily, but this is debatable.\n\nMy expectation for such a behavior would be that it would reconnect to\na random connstring each time, otherwise what's the point of using\nthis with -C? If we needed to forbid some option combinations that is\nalso an option.\n\n> >> I was thinking of providing a allowing a list of conninfo strings with\n> >> repeated options, eg --conninfo \"foo\" --conninfo \"bla\"…\n> >\n> > That was my first thought when reading the subject of this thread:\n> > create a list of connection strings and pass one of them to\n> > doConnect() to grab the properties looked for. That's a bit confusing\n> > though as pgbench does not support directly connection strings,\n>\n> They are supported because libpq silently assumes that \"dbname\" can be a\n> full connection string.\n>\n> > and we should be careful to keep fallback_application_name intact.\n>\n> Hmmm. See attached patch, ISTM that it does the right thing.\n\nI guess the multiple --conninfo approach is fine; I personally liked\nhaving the list come from a file, as you could benchmark different\ngroups/clusters based on a file, much easier than constructing\nmultiple pgbench invocations depending. I can see an argument for\nboth approaches. The PGSERVICEFILE was an idea I'd had to store\neasily indexed groups of connection information in a way that I didn't\nneed to know all the details, could easily parse, and could later pass\nin the ENV so libpq could just pull out the information.\n\n> >> Your approach using PGSERVICEFILE also make sense!\n> >\n> > I am not sure that's actually needed here, as it is possible to pass\n> > down a service name within a connection string. I think that you'd\n> > better leave libpq do all the work related to a service file, if\n> > specified. pgbench does not need to know any of that.\n>\n> Yes, this is an inconvenient with this approach, part of libpq machinery\n> is more or less replicated in pgbench, which is quite annoying, and less\n> powerful.\n\nThere is some small fraction reproduced here just to pull out the\nnamed sections; no other parsing should be done though.\n\n> Attached my work-in-progress version, with a few open issues (eg probably\n> not thread safe), but comments about the provided feature are welcome.\n>\n> I borrowed the \"strategy\" option, renamed policy, from the initial patch.\n> Pgbench just accepts several connection strings as parameters, eg:\n>\n> pgbench ... \"service=db1\" \"service=db2\" \"service=db3\"\n>\n> The next stage is to map scripts to connections types and connections\n> to connection types, so that pgbench could run W transactions against a\n> primary and R transactions agains a hot standby, for instance. I have a\n> some design for that, but nothing is implemented.\n>\n> There is also the combination with the error handling patch to consider:\n> if a connection fails, a connection to a replica could be issued instead.\n\nI'll see if I can take a look at your latest patch. I was also\nwondering about how we should handle `pgbench -i` with multiple\nconnection strings; currently it would only initialize with the first\nDSN it gets, but it probably makes sense to run initialize against all\nof the databases (or at least attempt to). Maybe this is one argument\nfor the multiple --conninfo handling, since you could explicitly pass\nthe databases you want. (Not that it is hard to just loop over\nconnection info and `pgbench -i` with ENV, or any other number of ways\nto accomplish the same thing.)\n\nBest,\n\nDavid\n\n\n", "msg_date": "Fri, 27 Aug 2021 12:29:24 -0500", "msg_from": "David Christensen <david.christensen@crunchydata.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "Hello David,\n\n>>> round-robin and random make sense. I am wondering how round-robin\n>>> would work with -C, though? Would you just reuse the same connection\n>>> string as the one chosen at the starting point.\n>>\n>> Well, not necessarily, but this is debatable.\n>\n> My expectation for such a behavior would be that it would reconnect to\n> a random connstring each time, otherwise what's the point of using\n> this with -C? If we needed to forbid some option combinations that is\n> also an option.\n\nYep. ISTM that it should follow the connection policy/strategy, what ever \nit is.\n\n>>>> I was thinking of providing a allowing a list of conninfo strings with\n>>>> repeated options, eg --conninfo \"foo\" --conninfo \"bla\"…\n>>>\n>>> That was my first thought when reading the subject of this thread:\n>>> create a list of connection strings and pass one of them to\n>>> doConnect() to grab the properties looked for. That's a bit confusing\n>>> though as pgbench does not support directly connection strings,\n>>\n>> They are supported because libpq silently assumes that \"dbname\" can be a\n>> full connection string.\n>>\n>>> and we should be careful to keep fallback_application_name intact.\n>>\n>> Hmmm. See attached patch, ISTM that it does the right thing.\n>\n> I guess the multiple --conninfo approach is fine; I personally liked\n> having the list come from a file, as you could benchmark different\n> groups/clusters based on a file, much easier than constructing\n> multiple pgbench invocations depending. I can see an argument for\n> both approaches. The PGSERVICEFILE was an idea I'd had to store\n> easily indexed groups of connection information in a way that I didn't\n> need to know all the details, could easily parse, and could later pass\n> in the ENV so libpq could just pull out the information.\n\nThe attached version does work with the service file if the user provides \n\"service=whatever\" on the command line. The main difference is that it \nsticks to the libpq policy to use an explicit connection string or list of \nconnection strings.\n\nAlso, note that the patch I sent dropped the --conninfo option. \nConnections are simply tghe last arguments to pgbench.\n\n> I'll see if I can take a look at your latest patch.\n\nThanks!\n\n> I was also wondering about how we should handle `pgbench -i` with \n> multiple connection strings; currently it would only initialize with the \n> first DSN it gets, but it probably makes sense to run initialize against \n> all of the databases (or at least attempt to).\n\nI'll tend to disagree on this one. Pgbench whole expectation is to run \nagainst \"one\" system, which might be composed of several nodes because of \nreplications. I do not think that it is desirable to jump to \"serveral \nfully independent databases\".\n\n> Maybe this is one argument for the multiple --conninfo handling, since \n> you could explicitly pass the databases you want. (Not that it is hard \n> to just loop over connection info and `pgbench -i` with ENV, or any \n> other number of ways to accomplish the same thing.)\n\nYep.\n\n-- \nFabien.", "msg_date": "Sat, 28 Aug 2021 11:01:50 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "Hi guys,\n\nIt looks like David sent a patch and Fabien sent a followup patch. But\nthere hasn't been a whole lot of discussion or further patches.\n\nIt sounds like there are some basic questions about what the right\ninterface should be. Are there specific questions that would be\nhelpful for moving forward?\n\n\n", "msg_date": "Tue, 15 Mar 2022 16:29:33 -0400", "msg_from": "Greg Stark <stark@mit.edu>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "\nHello Greg,\n\n> It looks like David sent a patch and Fabien sent a followup patch. But\n> there hasn't been a whole lot of discussion or further patches.\n>\n> It sounds like there are some basic questions about what the right\n> interface should be. Are there specific questions that would be\n> helpful for moving forward?\n\nReview the designs and patches and tell us what you think?\n\nPersonnaly, I think that allowing multiple connections is a good thing, \nespecially if the code impact is reduced, which is the case with the \nversion I sent.\n\nThen for me the next step would be to have a reconnection on errors so as \nto implement a client-side failover policy that could help testing a \nserver-failover performance impact. I have done that internally but it \nrequires that \"Pgbench Serialization and deadlock errors\" to land, as it \nwould just be another error that can be handled.\n\n-- \nFabien.\n\n\n", "msg_date": "Wed, 16 Mar 2022 10:28:57 +0100 (CET)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "The current version of the patch does not apply, so I could not test it.\r\n\r\nHere are some comments I have.\r\n\r\nPgbench is a simple benchmark tool by design, and I wonder if adding \r\na multiconnect feature will cause pgbench to be used incorrectly.\r\nA real world use-case will be helpful for this thread.\r\n\r\nFor the current patch, Should the report also cover per-database statistics (tps/latency/etc.) ?\r\n\r\nRegards,\r\n\r\nSami Imseih\r\nAmazon Web Services\r\n\r\n\r\n", "msg_date": "Fri, 18 Mar 2022 17:32:45 +0000", "msg_from": "\"Imseih (AWS), Sami\" <simseih@amazon.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "Hi Sami,\n\n> Pgbench is a simple benchmark tool by design, and I wonder if adding\n> a multiconnect feature will cause pgbench to be used incorrectly.\n\nMaybe, but I do not see how it would be worse that what pgbench already \nallows.\n\n> A real world use-case will be helpful for this thread.\n\nBasically more versatile testing for non single host setups.\n\nFor instance, it would allow testing directly a multi-master setup, such \nas bucardo, symmetricds or coackroachdb.\n\nIt would be a first step on the path to allow interesting features such \nas:\n\n - testing failover setup, on connection error a client could connect to \nanother host.\n\n - testing a primary/standby setup, with write transactions sent to the \nprimary and read transactions sent to the standbyes.\n\nBasically I have no doubt that it can be useful.\n\n> For the current patch, Should the report also cover per-database \n> statistics (tps/latency/etc.) ?\n\nThat could be a \"per-connection\" option. If there is a reasonable use case \nI think that it would be an easy enough feature to implement.\n\nAttached a rebased version.\n\n-- \nFabien.", "msg_date": "Sat, 19 Mar 2022 17:43:52 +0100 (CET)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "On Sat, Mar 19, 2022 at 11:43 AM Fabien COELHO <coelho@cri.ensmp.fr> wrote:\n\n>\n> Hi Sami,\n>\n> > Pgbench is a simple benchmark tool by design, and I wonder if adding\n> > a multiconnect feature will cause pgbench to be used incorrectly.\n>\n> Maybe, but I do not see how it would be worse that what pgbench already\n> allows.\n>\n\nI agree that pgbench is simple; perhaps really too simple when it comes to\nbeing able to measure much more than basic query flows. What pgbench does\nhave in its favor is being distributed with the core distribution.\n\nI think there is definitely space for a more complicated benchmarking tool\nthat exercises more scenarios and more realistic query patterns and\nscenarios. Whether that is distributed with the core is another question.\n\nDavid\n\nOn Sat, Mar 19, 2022 at 11:43 AM Fabien COELHO <coelho@cri.ensmp.fr> wrote:\nHi Sami,\n\n> Pgbench is a simple benchmark tool by design, and I wonder if adding\n> a multiconnect feature will cause pgbench to be used incorrectly.\n\nMaybe, but I do not see how it would be worse that what pgbench already \nallows.I agree that pgbench is simple; perhaps really too simple when it comes to being able to measure much more than basic query flows.  What pgbench does have in its favor is being distributed with the core distribution.I think there is definitely space for a more complicated benchmarking tool that exercises more scenarios and more realistic query patterns and scenarios.  Whether that is distributed with the core is another question.David", "msg_date": "Tue, 22 Mar 2022 10:40:09 -0500", "msg_from": "David Christensen <david.christensen@crunchydata.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "\n>>> Pgbench is a simple benchmark tool by design, and I wonder if adding\n>>> a multiconnect feature will cause pgbench to be used incorrectly.\n>>\n>> Maybe, but I do not see how it would be worse that what pgbench already\n>> allows.\n>>\n>\n> I agree that pgbench is simple; perhaps really too simple when it comes to\n> being able to measure much more than basic query flows. What pgbench does\n> have in its favor is being distributed with the core distribution.\n>\n> I think there is definitely space for a more complicated benchmarking tool\n> that exercises more scenarios and more realistic query patterns and\n> scenarios. Whether that is distributed with the core is another question.\n\nAs far as this feature is concerned, the source code impact of the patch \nis very small, so I do not think that is worth barring this feature on \nthat ground.\n\n-- \nFabien.\n\n\n", "msg_date": "Fri, 25 Mar 2022 10:50:09 +0100 (CET)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "According to the cfbot this patch needs a rebase\n\n\n", "msg_date": "Thu, 31 Mar 2022 15:00:58 -0400", "msg_from": "Greg Stark <stark@mit.edu>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "> According to the cfbot this patch needs a rebase\n\nIndeed. v4 attached.\n\n-- \nFabien.", "msg_date": "Sat, 2 Apr 2022 15:34:51 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "2022年4月2日(土) 22:35 Fabien COELHO <coelho@cri.ensmp.fr>:\n>\n>\n> > According to the cfbot this patch needs a rebase\n>\n> Indeed. v4 attached.\n\nHi\n\ncfbot reports the patch no longer applies. As CommitFest 2022-11 is\ncurrently underway, this would be an excellent time to update the patch.\n\nThanks\n\nIan Barwick\n\n\n", "msg_date": "Fri, 4 Nov 2022 13:39:48 +0900", "msg_from": "Ian Lawrence Barwick <barwick@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "Hello Ian,\n\n> cfbot reports the patch no longer applies. As CommitFest 2022-11 is\n> currently underway, this would be an excellent time to update the patch.\n\nAttached a v5 which is just a rebase.\n\n-- \nFabien.", "msg_date": "Mon, 7 Nov 2022 21:46:22 +0100 (CET)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "This patch seems to have quite some use case overlap with my patch which\nadds load balancing to libpq itself:\nhttps://www.postgresql.org/message-id/flat/PR3PR83MB04768E2FF04818EEB2179949F7A69@PR3PR83MB0476.EURPRD83.prod.outlook.com\n\nMy patch is only able to add \"random\" load balancing though, not\n\"round-robin\". So this patch still definitely seems useful, even when mine\ngets merged.\n\nI'm not sure that the support for the \"working\" connection is necessary\nfrom a feature perspective though (usability/discoverability is another\nquestion). It's already possible to achieve the same behaviour by simply\nproviding multiple host names in the connection string. You can even tell\nlibpq to connect to a primary or secondary by using the\ntarget_session_attrs option.\n\nOn Fri, 6 Jan 2023 at 11:33, Fabien COELHO <coelho@cri.ensmp.fr> wrote:\n\n>\n> Hello Ian,\n>\n> > cfbot reports the patch no longer applies. As CommitFest 2022-11 is\n> > currently underway, this would be an excellent time to update the patch.\n>\n> Attached a v5 which is just a rebase.\n>\n> --\n> Fabien.\n\nThis patch seems to have quite some use case overlap with my patch which adds load balancing to libpq itself: https://www.postgresql.org/message-id/flat/PR3PR83MB04768E2FF04818EEB2179949F7A69@PR3PR83MB0476.EURPRD83.prod.outlook.comMy patch is only able to add \"random\" load balancing though, not \"round-robin\". So this patch still definitely seems useful, even when mine gets merged. I'm not sure that the support for the \"working\" connection is necessary from a feature perspective though (usability/discoverability is another question). It's already possible to achieve the same behaviour by simply providing multiple host names in the connection string. You can even tell libpq to connect to a primary or secondary by using the target_session_attrs option.On Fri, 6 Jan 2023 at 11:33, Fabien COELHO <coelho@cri.ensmp.fr> wrote:\nHello Ian,\n\n> cfbot reports the patch no longer applies.  As CommitFest 2022-11 is\n> currently underway, this would be an excellent time to update the patch.\n\nAttached a v5 which is just a rebase.\n\n-- \nFabien.", "msg_date": "Fri, 6 Jan 2023 11:44:52 +0100", "msg_from": "Jelte Fennema <postgres@jeltef.nl>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "\nHello Jelte,\n\n> This patch seems to have quite some use case overlap with my patch which\n> adds load balancing to libpq itself:\n> https://www.postgresql.org/message-id/flat/PR3PR83MB04768E2FF04818EEB2179949F7A69@PR3PR83MB0476.EURPRD83.prod.outlook.com\n\nThanks for the pointer.\n\nThe end purpose of the patch is to allow pgbench to follow a failover at \nsome point, at the client level, AFAICR.\n\n> My patch is only able to add \"random\" load balancing though, not\n> \"round-robin\". So this patch still definitely seems useful, even when mine\n> gets merged.\n\nYep. I'm not sure the end purpose is the same, but possibly the pgbench \npatch could take advantage of libpq extension.\n\n> I'm not sure that the support for the \"working\" connection is necessary\n> from a feature perspective though (usability/discoverability is another\n> question). It's already possible to achieve the same behaviour by simply\n> providing multiple host names in the connection string. You can even tell\n> libpq to connect to a primary or secondary by using the\n> target_session_attrs option.\n\n-- \nFabien.\n\n\n", "msg_date": "Tue, 10 Jan 2023 16:59:34 +0100 (CET)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "On Tue, 8 Nov 2022 at 02:16, Fabien COELHO <coelho@cri.ensmp.fr> wrote:\n>\n>\n> Hello Ian,\n>\n> > cfbot reports the patch no longer applies. As CommitFest 2022-11 is\n> > currently underway, this would be an excellent time to update the patch.\n>\n> Attached a v5 which is just a rebase.\n\nThe patch does not apply on top of HEAD as in [1], please post a rebased patch:\n=== Applying patches on top of PostgreSQL commit ID\n3c6fc58209f24b959ee18f5d19ef96403d08f15c ===\n=== applying patch ./pgbench-multi-connect-conninfo-5.patch\n(Stripping trailing CRs from patch; use --binary to disable.)\npatching file doc/src/sgml/ref/pgbench.sgml\nHunk #3 FAILED at 921.\n1 out of 3 hunks FAILED -- saving rejects to file\ndoc/src/sgml/ref/pgbench.sgml.rej\n\n[1] - http://cfbot.cputube.org/patch_41_3227.log\n\nRegards,\nVignesh\n\n\n", "msg_date": "Wed, 11 Jan 2023 22:17:51 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" }, { "msg_contents": "On Wed, 11 Jan 2023 at 22:17, vignesh C <vignesh21@gmail.com> wrote:\n>\n> On Tue, 8 Nov 2022 at 02:16, Fabien COELHO <coelho@cri.ensmp.fr> wrote:\n> >\n> >\n> > Hello Ian,\n> >\n> > > cfbot reports the patch no longer applies. As CommitFest 2022-11 is\n> > > currently underway, this would be an excellent time to update the patch.\n> >\n> > Attached a v5 which is just a rebase.\n>\n> The patch does not apply on top of HEAD as in [1], please post a rebased patch:\n> === Applying patches on top of PostgreSQL commit ID\n> 3c6fc58209f24b959ee18f5d19ef96403d08f15c ===\n> === applying patch ./pgbench-multi-connect-conninfo-5.patch\n> (Stripping trailing CRs from patch; use --binary to disable.)\n> patching file doc/src/sgml/ref/pgbench.sgml\n> Hunk #3 FAILED at 921.\n> 1 out of 3 hunks FAILED -- saving rejects to file\n> doc/src/sgml/ref/pgbench.sgml.rej\n\nThere has been no updates on this thread for some time, so this has\nbeen switched as Returned with Feedback. Feel free to change it open\nin the next commitfest if you plan to continue on this.\n\nRegards,\nVignesh\n\n\n", "msg_date": "Tue, 31 Jan 2023 23:00:21 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] pgbench: add multiconnect option" } ]
[ { "msg_contents": "Hello,\n\nHere is a set of patches to add SQL:2011 application-time support (aka\nvalid-time).\nPrevious discussion was on\nhttps://www.postgresql.org/message-id/20200930073908.GQ1996@paquier.xyz\nbut I thought I should update the email subject.\n\nThere are four patches here:\n\n- Add PERIODs.\n- Add temporal PRIMARY KEY and UNIQUE constraints.\n- Add UPDATE/DELETE FOR PORTION OF.\n- Add temporal FOREIGN KEYs.\n\nThe PERIOD patch is mostly Vik Fearing's work (submitted here a few\nyears ago), so he should get credit for that!\n\nAll patches have tests & documentation. I do have a few more tests I\nplan to write, and there are some questions for reviewers embedded in\npatches (mostly about when to lock and/or copy data structures). I've\ntried to format these as C++ comments to indicate they should be\nremoved before committing.\n\nThroughout I've made sure that wherever SQL:2011 accepts a PERIOD, we\nalso accept a range column. So in all these examples valid_at could be\neither one:\n\n PRIMARY KEY (id, valid_at WITHOUT OVERLAPS)\n FOREIGN KEY (id, PERIOD valid_at)\n REFERENCES too (id, PERIOD valid_at)\n FOR PORTION OF valid_at FROM t1 TO t2\n\nRange types are superior to PERIODs in many ways, so I think we should\nsupport both. For example you can SELECT them, WHERE them, GROUP BY\nthem, pass them to functions, return them from functions, do\narithmetic on them, index them, etc.\n\nIn fact whether you use a PERIOD or a range, the implementation uses\nranges a lot, since they are such a good fit. A temporal PK is really\nan exclusion constraint, etc. When you define a PERIOD, we find a\nmatching range type and store its oid on the period record. If there\nare more than one range type we raise an error, but you can give a\nrangetype option to remove the ambiguity. This means we support\nPERIODs of any type (basically), not just dates & timestamps.\n\nAccording to SQL:2011 we should automatically set any columns used by\na PERIOD to NOT NULL. I've ignored that requirement, since permitting\nnullable columns is strictly greater functionality: you can always\nmake the columns NOT NULL if you like. Interpreting NULLs as unbounded\nfits better with our range types, and it means you don't have to use\nsentinels. (Timestamp has +-Infinity, but many types don't.) Oracle\nalso accepts null PERIOD columns and treats them the same way. I don't\nthink it would break anything though to force PERIOD columns to NOT\nNULL. If you hate sentinels you can just use range columns. But still\nI see no reason to force this on our users.\n\nIn the FOR PORTION OF bounds I accept MINVALUE and MAXVALUE as special\ntokens. I chose the names to be consistent with partition syntax. This\nisn't part of the standard but seems nice.\n\nHere are a few other things to discuss:\n\n- My patch only adds application time. There is a separate patch to\nadd system time: https://commitfest.postgresql.org/33/2316/ I don't\nforesee any serious conflicts between our work, and in general I think\neach patch implements its functionality at an appropriate (but\ndifferent) level of abstraction. But I haven't looked at that patch\nrecently. I'll try to give some comments during this commitfest. The\none place they probably overlap is with defining PERIODs. Since\nsystem-time periods *must* be named SYSTEM_TIME, even that overlap\nshould be slight, but it still might be worth accepting the PERIOD\npatch here before adopting either. Even SYSTEM_TIME ought to be\nrecorded in information_schema.periods IIRC.\n\n- The biggest thing remaining to do is to add support for partitioned\ntables. I would love some help with that if anyone is interested.\n\n- Since temporal PKs are implemented with exclusion constraints they\nuse GiST indexes, so you can't really use them without the btree_gist\nextension (unless *all* your key parts are ranges---which is how we\ntest exclusion constraints). Personally I'm okay with this, since even\nexclusion constraints are pretty useless without that extension. But\nit seems like something to talk about.\n\n- At PgCon 2020 Vik suggested a different way of querying for FK\nchecks, which he used in his own temporal tables extension. It is more\ncomplicated but he thinks it may be faster. I plan to try both and run\nsome benchmarks. I'm not sure whether his approach will work with\nCASCADE/SET NULL/SET DEFAULT---but I haven't looked at it in a while.\n\n- It is hard to avoid a shift/reduce conflict in FOR PORTION OF\n<period_or_range> FROM <expr> TO <expr> because expressions may\ncontain date INTERVALs that also may contain TO. So this is an error:\n\n FOR PORTION OF valid_at\n FROM '2018-03-01' AT TIME ZONE INTERVAL '2' HOUR\n TO '2019-01-01'\n\nbut this works:\n\n FOR PORTION OF valid_at\n FROM ('2018-03-01' AT TIME ZONE INTERVAL '2' HOUR)\n TO '2019-01-01'\n\nI'm personally satisfied with that, but if anyone thinks it can be\nimproved please let me know. It would be nice if the parser were smart\nenough to see that without a second TO, it must belong to FOR PORTION\nOF, not the interval. But *I'm* not smart enough to teach it that. :-)\nIf only it could have a greater lookahead. . . .\n\n- Normally we return the number of rows affected by an UPDATE/DELETE.\nWhat do you think we should do when a FOR PORTION OF causes extra rows\nto be inserted? I'm not doing anything special here today. After all\nforeign keys don't do anything extra when they CASCADE/SET (to my\nknowledge). Also I think adding info about the inserted rows might be\nannoying, since I'd have to communicate it from within the trigger\nfunction. I'm really hoping no one asks for this.\n\n- Since PERIODs are a weird neither-fish-nor-foul thing (parsed a lot\nlike a column, but also behaving like a constraint), they add a lot of\ntedious if-statements when they are used by an index or constraint. In\nmany places I've used a zero attnum to signal that a component is\nreally a PERIOD. (Range columns are easy since they really are a\ncolumn.) I feel this approach is pretty ugly, so I will probably\nexperiment a bit with a different way. If anyone else wants to take\nthis on though, I'm grateful for the help.\n\n- It would be really cool if ON CONFLICT DO UPDATE had a temporal\nvariant so it would INSERT the missing durations and UPDATE the\nexisting ones. That's what Tom Johnston said the standard should have\nrequired in *Bitemporal Data*, and it does make things a lot easier on\nthe client side. But that is something to do in a later patch. . . .\n\nYours,\nPaul", "msg_date": "Wed, 30 Jun 2021 10:39:00 -0700", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "SQL:2011 application time" }, { "msg_contents": "On Wed, Jun 30, 2021 at 10:39 AM Paul A Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n> Here is a set of patches to add SQL:2011 application-time support (aka\n> valid-time).\n\nHere is a small fix to prevent `FOR PORTION OF valid_at FROM MAXVALUE\nTO foo` or `FROM foo TO MINVALUE`. I rebased on latest master too.\n\nYours,\nPaul", "msg_date": "Fri, 2 Jul 2021 14:39:50 -0700", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Fri, Jul 2, 2021 at 2:39 PM Paul A Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On Wed, Jun 30, 2021 at 10:39 AM Paul A Jungwirth\n> <pj@illuminatedcomputing.com> wrote:\n> > Here is a set of patches to add SQL:2011 application-time support (aka\n> > valid-time).\n>\n> Here is a small fix to prevent `FOR PORTION OF valid_at FROM MAXVALUE\n> TO foo` or `FROM foo TO MINVALUE`. I rebased on latest master too.\n\nHere is a patch set that cleans up the catalog docs for pg_period. The\ncolumns have changed since that was written, and also we use a\ndifferent sgml structure on those pages now. Note pg_period still\ncontains a couple essentially-unused columns, perislocal and\nperinhcount. Those are intended for supporting table inheritance, so\nI've left them in.\n\nPaul", "msg_date": "Sat, 3 Jul 2021 10:46:55 -0700", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Sat, Jul 03, 2021 at 10:46:55AM -0700, Paul A Jungwirth wrote:\n> On Fri, Jul 2, 2021 at 2:39 PM Paul A Jungwirth\n> <pj@illuminatedcomputing.com> wrote:\n> >\n> > On Wed, Jun 30, 2021 at 10:39 AM Paul A Jungwirth\n> > <pj@illuminatedcomputing.com> wrote:\n> > > Here is a set of patches to add SQL:2011 application-time support (aka\n> > > valid-time).\n> >\n> > Here is a small fix to prevent `FOR PORTION OF valid_at FROM MAXVALUE\n> > TO foo` or `FROM foo TO MINVALUE`. I rebased on latest master too.\n> \n> Here is a patch set that cleans up the catalog docs for pg_period. The\n> columns have changed since that was written, and also we use a\n> different sgml structure on those pages now. Note pg_period still\n> contains a couple essentially-unused columns, perislocal and\n> perinhcount. Those are intended for supporting table inheritance, so\n> I've left them in.\n> \n\nHi Paul,\n\nThanks for working on this. It would be a great improvement.\n\nI wanted to test the patches but:\n\npatch 01: does apply but doesn't compile, attached the compile errors.\npatch 04: does not apply clean.\n\nPlease fix and resend.\n\n-- \nJaime Casanova\nDirector de Servicios Profesionales\nSystemGuards - Consultores de PostgreSQL", "msg_date": "Sat, 4 Sep 2021 14:55:56 -0500", "msg_from": "Jaime Casanova <jcasanov@systemguards.com.ec>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Sat, Sep 4, 2021 at 12:56 PM Jaime Casanova\n<jcasanov@systemguards.com.ec> wrote:\n>\n> patch 01: does apply but doesn't compile, attached the compile errors.\n> patch 04: does not apply clean.\n\nThanks for taking a look! I've rebased & made it compile again. v7 attached.\n\nYours,\nPaul", "msg_date": "Mon, 6 Sep 2021 12:52:37 -0700", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Mon, Sep 6, 2021 at 12:53 PM Paul A Jungwirth <\npj@illuminatedcomputing.com> wrote:\n\n> On Sat, Sep 4, 2021 at 12:56 PM Jaime Casanova\n> <jcasanov@systemguards.com.ec> wrote:\n> >\n> > patch 01: does apply but doesn't compile, attached the compile errors.\n> > patch 04: does not apply clean.\n>\n> Thanks for taking a look! I've rebased & made it compile again. v7\n> attached.\n>\n> Yours,\n> Paul\n>\nHi,\nFor v7-0001-Add-PERIODs.patch :\n\n+ * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group\n\nIt seems the year (2018) should be updated to 2021.\n\nFor RemovePeriodById(), it seems table_open() can be called\nafter SearchSysCache1(). This way, if HeapTupleIsValid(tup) is true,\ntable_open() can be skipped.\n\nFor tablecmds.c, AT_PASS_ADD_PERIOD is defined as 5 with AT_PASS_ADD_CONSTR\netc moved upward. Do we need to consider compatibility ?\n\nThere are a few TODO's such as:\n+ * TODO: What about periods?\n\nAre they going to be addressed in the next round of patches ?\n\nThere seems to be some overlap between ATExecAddPeriod()\nand AddRelationNewPeriod().\nIs it possible to reduce code duplication ?\n\nCheers\n\nOn Mon, Sep 6, 2021 at 12:53 PM Paul A Jungwirth <pj@illuminatedcomputing.com> wrote:On Sat, Sep 4, 2021 at 12:56 PM Jaime Casanova\n<jcasanov@systemguards.com.ec> wrote:\n>\n> patch 01: does apply but doesn't compile, attached the compile errors.\n> patch 04: does not apply clean.\n\nThanks for taking a look! I've rebased & made it compile again. v7 attached.\n\nYours,\nPaulHi,For v7-0001-Add-PERIODs.patch :+ * Portions Copyright (c) 1996-2018, PostgreSQL Global Development GroupIt seems the year (2018) should be updated to 2021.For RemovePeriodById(), it seems table_open() can be called after SearchSysCache1(). This way, if HeapTupleIsValid(tup) is true, table_open() can be skipped.For tablecmds.c, AT_PASS_ADD_PERIOD is defined as 5 with AT_PASS_ADD_CONSTR etc moved upward. Do we need to consider compatibility ?There are a few TODO's such as:+    * TODO: What about periods?Are they going to be addressed in the next round of patches ?There seems to be some overlap between ATExecAddPeriod() and AddRelationNewPeriod().Is it possible to reduce code duplication ?Cheers", "msg_date": "Mon, 6 Sep 2021 13:40:13 -0700", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Mon, Sep 06, 2021 at 12:52:37PM -0700, Paul A Jungwirth wrote:\n> On Sat, Sep 4, 2021 at 12:56 PM Jaime Casanova\n> <jcasanov@systemguards.com.ec> wrote:\n> >\n> > patch 01: does apply but doesn't compile, attached the compile errors.\n> > patch 04: does not apply clean.\n> \n> Thanks for taking a look! I've rebased & made it compile again. v7 attached.\n> \n\npatch 01: does apply but gives a compile warning (which is fixed by patch\n02)\n\"\"\"\nparse_utilcmd.c: In function ‘generateClonedIndexStmt’:\nparse_utilcmd.c:1730:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]\n Period *p = makeNode(Period);\n ^~~~~~\n\"\"\"\n\npatch 03: produces these compile errors. \n\nanalyze.c: In function ‘transformForPortionOfBound’:\nanalyze.c:1171:3: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement]\n A_Const *n2 = makeNode(A_Const);\n ^~~~~~~\nanalyze.c:1172:10: error: ‘union ValUnion’ has no member named ‘type’\n n2->val.type = T_Null;\n ^\nanalyze.c:1172:18: error: ‘T_Null’ undeclared (first use in this function)\n n2->val.type = T_Null;\n ^~~~~~\nanalyze.c:1172:18: note: each undeclared identifier is reported only once for each function it appears in\n\n\n\n-- \nJaime Casanova\nDirector de Servicios Profesionales\nSystemGuards - Consultores de PostgreSQL\n\n\n", "msg_date": "Fri, 10 Sep 2021 20:50:17 -0500", "msg_from": "Jaime Casanova <jcasanov@systemguards.com.ec>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Fri, Sep 10, 2021 at 6:50 PM Jaime Casanova\n<jcasanov@systemguards.com.ec> wrote:\n>\n> patch 01: does apply but gives a compile warning (which is fixed by patch\n> 02)\n> [snip]\n> patch 03: produces these compile errors.\n\nI did a rebase and fixed this new error, as well as the warnings.\n\nOn Mon, Sep 6, 2021 at 1:40 PM Zhihong Yu <zyu@yugabyte.com> wrote:\n>\n> + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group\n>\n> It seems the year (2018) should be updated to 2021.\n\nDone.\n\n> For RemovePeriodById(), it seems table_open() can be called after SearchSysCache1(). This way, if HeapTupleIsValid(tup) is true, table_open() can be skipped.\n\nThis seems like it permits a race condition when two connections both\ntry to drop the period, right?\n\n> For tablecmds.c, AT_PASS_ADD_PERIOD is defined as 5 with AT_PASS_ADD_CONSTR etc moved upward. Do we need to consider compatibility ?\n\nI don't think there is a compatibility problem---can you explain?\nThese symbols aren't used outside tablecmds.c and the values aren't\nsaved anywhere AFAIK.\n\n> There are a few TODO's such as:\n> Are they going to be addressed in the next round of patches ?\n\nThese are mostly questions I'm hoping a reviewer can help me answer,\nbut I'll take a pass through them and see which I can remove myself.\nSeveral are for adding support for partitioned tables, where I would\ndefinitely appreciate help.\n\n> There seems to be some overlap between ATExecAddPeriod() and AddRelationNewPeriod().\n> Is it possible to reduce code duplication ?\n\nI've refactored those functions to remove some duplication, but I\nthink I prefer the old version---let me know if you have suggestions\nto avoid the duplication in a nicer way.\n\nOh also I realized fp_triggers.c wasn't included in the last few patch\nfiles---I'm sorry about that!\n\nLatest files attached. Thanks for the reviews!\n\nPaul", "msg_date": "Sun, 12 Sep 2021 21:12:19 -0700", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "So I've been eagerly watching this thread and hoping to have time to devote\nto it. I've also been looking at the thread at\nhttps://www.postgresql.org/message-id/CALAY4q8Pp699qv-pJZc4toS-e2NzRJKrvaX-xqG1aqj2Q+Ww-w@mail.gmail.com\nthat covers system versioning, and per our conversation far too long ago\n(again, my bad) it's obvious that the two efforts shouldn't do anything\nthat would be in conflict with one another, as we eventually have to\nsupport bitemporal [1] tables: tables that have both system versioning and\nan application period.\n\nBelow is a list of observations and questions about this proposed patch of\nitself in isolation, but mostly about how it relates to the work being done\nfor system versioning.\n\n1. This patch creates a pg_period catalog table, whereas the system\nversioning relies on additions to pg_attribute to identify the start/end\ncolumns. Initially I thought this was because it was somehow possible to\nhave *multiple* application periods defined on a table, but in reading [1]\nI see that there are some design suppositions that would make a second\napplication period impossible[2]. I can also see where having this table\nwould facilitate the easy creation of INFORMATION_SCHEMA.PERIODS. I was\npreviously unaware that this info schema table was a thing, but I have\nfound references to it, though I'm unclear as to whether it's supposed to\nhave information about system versioned tables in it as well.\n\nQ 1.1. Would a bitemporal table have two entries in that view?\nQ 1.2. Could you see being able to implement this without pg_period, using\nonly additions to pg_attribute (start/end for system temporal, start/end\nfor application, plus an addition for period name)?\nQ 1.3. Can you see a way to represent the system versioning in pg_period\nsuch that bitemporal tables were possible?\n\n 2. The system versioning effort has chosen 'infinity' as their end-time\nvalue, whereas you have chosen NULL as that makes sense for an unbounded\nrange. Other databases seem to leverage '9999-12-31 23:59:59' (SQLServer,\nIIRC) whereas some others seem to used '2999-12-31 23:59:59' but those\nmight have been home-rolled temporal implementations. To further add to the\nconfusion, the syntax seems to specify the keyword of MAXVALUE, which\nfurther muddies things. The system versioning people went with 'infinity'\nseemingly because it prescribe and end to the world like SQLServer did, but\nalso because it allowed for a primary key based on (id, endtime) and that's\njust not possible with NULL endtime values.\n\nQ 2.1. Do you have any thoughts about how to resolve this notational logjam?\n\n3. I noticed some inconsistency in the results from various \"SELECT * FROM\nportion_of_test\" examples. In some, the \"valid_at\" range is shown but not\ncolumns that make it up, and in some others, the \"valid_from\" and\n\"valid_to\" columns are shown, with no mention of the period. From what I've\nseen, the period column should be invisible unless invoked, like ctid or\nxmin.\n\n4. The syntax '2018-03-04' AT TIME ZONE INTERVAL '2' HOUR TO MINUTE simply\nconfounded me. I googled around for it, but could find no matches for\npostgres exception in mailing list discussions circa 2003. I tried it out\nmyself and, lo and behold\n\n# SELECT '2018-03-04' AT TIME ZONE INTERVAL '2' HOUR TO MINUTE;\n timezone\n---------------------\n 2018-03-04 05:02:00\n(1 row)\n\n\nI really didn't expect that to work, or even \"work\". I can see that it\nadded 2 minutes to UTC's perspective on my local concept of midnight, but I\ndon't understand what it's supposed to mean.\n\nQ 4.1. What does it mean?\n\n5. I haven't seen any actual syntax conflicts between this patch and the\nsystem versioning patch. Both teams added basically the same keywords,\nthough I haven't dove more deeply into any bison incompatibilities. Still,\nit's a great start.\n\n6. Overall, I'm really excited about what this will mean for data\ngovernance in postgres.\n\n[1]\nhttps://cs.ulb.ac.be/public/_media/teaching/infoh415/tempfeaturessql2011.pdf\n[2] In the bitemporal table example in [1] - the application period get the\ndefined primary key, and the system_time period would be merely unique\n\nOn Mon, Sep 13, 2021 at 12:12 AM Paul A Jungwirth <\npj@illuminatedcomputing.com> wrote:\n\n> On Fri, Sep 10, 2021 at 6:50 PM Jaime Casanova\n> <jcasanov@systemguards.com.ec> wrote:\n> >\n> > patch 01: does apply but gives a compile warning (which is fixed by patch\n> > 02)\n> > [snip]\n> > patch 03: produces these compile errors.\n>\n> I did a rebase and fixed this new error, as well as the warnings.\n>\n> On Mon, Sep 6, 2021 at 1:40 PM Zhihong Yu <zyu@yugabyte.com> wrote:\n> >\n> > + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group\n> >\n> > It seems the year (2018) should be updated to 2021.\n>\n> Done.\n>\n> > For RemovePeriodById(), it seems table_open() can be called after\n> SearchSysCache1(). This way, if HeapTupleIsValid(tup) is true, table_open()\n> can be skipped.\n>\n> This seems like it permits a race condition when two connections both\n> try to drop the period, right?\n>\n> > For tablecmds.c, AT_PASS_ADD_PERIOD is defined as 5 with\n> AT_PASS_ADD_CONSTR etc moved upward. Do we need to consider compatibility ?\n>\n> I don't think there is a compatibility problem---can you explain?\n> These symbols aren't used outside tablecmds.c and the values aren't\n> saved anywhere AFAIK.\n>\n> > There are a few TODO's such as:\n> > Are they going to be addressed in the next round of patches ?\n>\n> These are mostly questions I'm hoping a reviewer can help me answer,\n> but I'll take a pass through them and see which I can remove myself.\n> Several are for adding support for partitioned tables, where I would\n> definitely appreciate help.\n>\n> > There seems to be some overlap between ATExecAddPeriod() and\n> AddRelationNewPeriod().\n> > Is it possible to reduce code duplication ?\n>\n> I've refactored those functions to remove some duplication, but I\n> think I prefer the old version---let me know if you have suggestions\n> to avoid the duplication in a nicer way.\n>\n> Oh also I realized fp_triggers.c wasn't included in the last few patch\n> files---I'm sorry about that!\n>\n> Latest files attached. Thanks for the reviews!\n>\n> Paul\n>\n\nSo I've been eagerly watching this thread and hoping to have time to devote to it. I've also been looking at the thread at https://www.postgresql.org/message-id/CALAY4q8Pp699qv-pJZc4toS-e2NzRJKrvaX-xqG1aqj2Q+Ww-w@mail.gmail.com that covers system versioning, and per our conversation far too long ago (again, my bad) it's obvious that the two efforts shouldn't do anything that would be in conflict with one another, as we eventually have to support bitemporal [1] tables: tables that have both system versioning and an application period.Below is a list of observations and questions about this proposed patch of itself in isolation, but mostly about how it relates to the work being done for system versioning.1. This patch creates a pg_period catalog table, whereas the system versioning relies on additions to pg_attribute to identify the start/end columns. Initially I thought this was because it was somehow possible to have multiple application periods defined on a table, but in reading [1] I see that there are some design suppositions that would make a second application period impossible[2]. I can also see where having this table would facilitate the easy creation of INFORMATION_SCHEMA.PERIODS. I was previously unaware that this info schema table was a thing, but I have found references to it, though I'm unclear as to whether it's supposed to have information about system versioned tables in it as well. Q 1.1. Would a bitemporal table have two entries in that view?Q 1.2. Could you see being able to implement this without pg_period, using only additions to pg_attribute (start/end for system temporal, start/end for application, plus an addition for period name)?Q 1.3. Can you see a way to represent the system versioning in pg_period such that bitemporal tables were possible? 2. The system versioning effort has chosen 'infinity' as their end-time value, whereas you have chosen NULL as that makes sense for an unbounded range. Other databases seem to leverage '9999-12-31 23:59:59' (SQLServer, IIRC) whereas some others seem to used '2999-12-31 23:59:59' but those might have been home-rolled temporal implementations. To further add to the confusion, the syntax seems to specify the keyword of MAXVALUE, which further muddies things. The system versioning people went with 'infinity' seemingly because it prescribe and end to the world like SQLServer did, but also because it allowed for a primary key based on (id, endtime) and that's just not possible with NULL endtime values.Q 2.1. Do you have any thoughts about how to resolve this notational logjam?3. I noticed some inconsistency in the results from various \"SELECT * FROM portion_of_test\" examples. In some, the \"valid_at\" range is shown but not columns that make it up, and in some others, the \"valid_from\" and \"valid_to\" columns are shown, with no mention of the period. From what I've seen, the period column should be invisible unless invoked, like ctid or xmin.4. The syntax '2018-03-04' AT TIME ZONE INTERVAL '2'  HOUR TO MINUTE simply confounded me. I googled around for it, but could find no matches for postgres exception in mailing list discussions circa 2003. I tried it out myself and, lo and behold# SELECT '2018-03-04' AT TIME ZONE INTERVAL '2'  HOUR TO MINUTE;      timezone      --------------------- 2018-03-04 05:02:00(1 row)I really didn't expect that to work, or even \"work\". I can see that it added 2 minutes to UTC's perspective on my local concept of midnight, but I don't understand what it's supposed to mean.Q 4.1. What does it mean?5. I haven't seen any actual syntax conflicts between this patch and the system versioning patch. Both teams added basically the same keywords, though I haven't dove more deeply into any bison incompatibilities. Still, it's a great start.6. Overall, I'm really excited about what this will mean for data governance in postgres.[1] https://cs.ulb.ac.be/public/_media/teaching/infoh415/tempfeaturessql2011.pdf[2] In the bitemporal table example in [1] - the application period get the defined primary key, and the system_time period would be merely uniqueOn Mon, Sep 13, 2021 at 12:12 AM Paul A Jungwirth <pj@illuminatedcomputing.com> wrote:On Fri, Sep 10, 2021 at 6:50 PM Jaime Casanova\n<jcasanov@systemguards.com.ec> wrote:\n>\n> patch 01: does apply but gives a compile warning (which is fixed by patch\n> 02)\n> [snip]\n> patch 03: produces these compile errors.\n\nI did a rebase and fixed this new error, as well as the warnings.\n\nOn Mon, Sep 6, 2021 at 1:40 PM Zhihong Yu <zyu@yugabyte.com> wrote:\n>\n> + * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group\n>\n> It seems the year (2018) should be updated to 2021.\n\nDone.\n\n> For RemovePeriodById(), it seems table_open() can be called after SearchSysCache1(). This way, if HeapTupleIsValid(tup) is true, table_open() can be skipped.\n\nThis seems like it permits a race condition when two connections both\ntry to drop the period, right?\n\n> For tablecmds.c, AT_PASS_ADD_PERIOD is defined as 5 with AT_PASS_ADD_CONSTR etc moved upward. Do we need to consider compatibility ?\n\nI don't think there is a compatibility problem---can you explain?\nThese symbols aren't used outside tablecmds.c and the values aren't\nsaved anywhere AFAIK.\n\n> There are a few TODO's such as:\n> Are they going to be addressed in the next round of patches ?\n\nThese are mostly questions I'm hoping a reviewer can help me answer,\nbut I'll take a pass through them and see which I can remove myself.\nSeveral are for adding support for partitioned tables, where I would\ndefinitely appreciate help.\n\n> There seems to be some overlap between ATExecAddPeriod() and AddRelationNewPeriod().\n> Is it possible to reduce code duplication ?\n\nI've refactored those functions to remove some duplication, but I\nthink I prefer the old version---let me know if you have suggestions\nto avoid the duplication in a nicer way.\n\nOh also I realized fp_triggers.c wasn't included in the last few patch\nfiles---I'm sorry about that!\n\nLatest files attached. Thanks for the reviews!\n\nPaul", "msg_date": "Mon, 13 Sep 2021 02:05:41 -0400", "msg_from": "Corey Huinker <corey.huinker@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Hi Corey,\n\nThanks for all the good questions!\n\n> 1. This patch creates a pg_period catalog table, whereas the system versioning relies on additions to pg_attribute to identify the start/end columns. Initially I thought this was because it was somehow possible to have multiple application periods defined on a table, but in reading [1] I see that there are some design suppositions that would make a second application period impossible[2]. I can also see where having this table would facilitate the easy creation of INFORMATION_SCHEMA.PERIODS. I was previously unaware that this info schema table was a thing, but I have found references to it, though I'm unclear as to whether it's supposed to have information about system versioned tables in it as well.\n\nYes, information_schema.periods is given by the standard. Having\npg_period seems like a natural place to store periods, since they are\nseparate entities. I think that is a better design than just storing\nthem as extra fields in pg_attribute. It follows normal normalization\nrules.\n\nThe standard forbids multiple application-time periods per table. From\nSQL:2011 in the SQL/Foundation section\n(7IWD2-02-Foundation-2011-12.pdf available from\nhttp://www.wiscorp.com/sql20nn.zip) under 11.27 <add table period\ndefinition>:\n\n> 5) If <table period definition> contains <application time period specification> ATPS, then:\n> b) The table descriptor of T shall not include a period descriptor other than a system-time period descriptor.\n\nIn other words you can add both a SYSTEM TIME period and one other\napplication-time period (whose name is your choice), but if you\nalready have an application-time period, you can't add another one.\n\nI also checked other RDBMSes and none of them allow it either:\n\nIn Mariadb 10.6.4 (the latest) I get \"ERROR 4154 (HY000); Cannot\nspecify more than one application-time period\".\n\nOracle disallows it with a vague error:\n\n SQL> create table t2 (id int, valid_from date, valid_til date,\nperiod for valid_at (valid_from, valid_til), period for valid_at2\nvalid_from, valid_til));\n create table t2 (id int, valid_from date, valid_til date, period\nfor valid_at (valid_from, valid_til), period for valid_at2\n(valid_from, valid_til))\n\n *\n ERROR at line 1:\n ORA-55603: invalid flashback archive or valid time period command\n\n(Using different start/end columns for each period doesn't change the result.)\n\nIn IBM DB2 you can only have one because application-time periods must\nbe named \"business_time\" (not joking).\n\nMssql (2019) doesn't support application periods.\n\nPersonally I feel like it's a weird limitation and I wouldn't mind\nsupporting more, but my current implementation only allows for one,\nand I'd have to rethink some things to do it differently.\n\nAlso: I think information_schema.periods *should* include SYSTEM_TIME\nperiods. The spec says (in SQL/Schemata, file\n7IWD2-11-Schemata-2011-12.pdf at the link above), \"The PERIODS base\ntable has one row for each period defined for a table. It effectively\ncontains a representation of the period descriptors.\" It doesn't say\nanything about excluding system-time periods.\n\nI checked mariadb, mssql, oracle, and db2, and I could only find this\ntable in db2, as syscat.periods. It includes both application-time and\nsystem-time periods.\n\nThe spec calls for the columns table_catalog, table_schema,\ntable_name, period_name, start_column_name, and end_column_name. There\nisn't a column to distinguish the period type, but since a period is a\nsystem-time period iff its name is \"SYSTEM_TIME\", technically such a\ncolumn isn't needed.\n\nThe db2 columns are periodname, tabschema, tabname, begincolname,\nendcolname, periodtype, historytabschema, and historytabname. The\nperiodtype column is either A or S (for application-time or\nsystem-time).\n\n> Q 1.1. Would a bitemporal table have two entries in that view?\n\nYes.\n\n> Q 1.2. Could you see being able to implement this without pg_period, using only additions to pg_attribute (start/end for system temporal, start/end for application, plus an addition for period name)?\n\nNot just period name, but also the range type associated with the\nperiod (which should be determined at period creation, so that you can\npass an option to disambiguate if there are two ranges defined for the\nsame base type), the constraint oid (to prevent end <= start), and\nsome more data for inherited tables (not really used yet). It seems\nugly to hang all these extra values on a pg_attribute record.\n\n> Q 1.3. Can you see a way to represent the system versioning in pg_period such that bitemporal tables were possible?\n\nYes. Even though the name \"SYSTEM_TIME\" is technically enough, I'd\nstill include a pertype column to make distinguishing system vs\napplication periods easier and more obvious.\n\n> 2. The system versioning effort has chosen 'infinity' as their end-time value, whereas you have chosen NULL as that makes sense for an unbounded range. Other databases seem to leverage '9999-12-31 23:59:59' (SQLServer, IIRC) whereas some others seem to used '2999-12-31 23:59:59' but those might have been home-rolled temporal implementations. To further add to the confusion, the syntax seems to specify the keyword of MAXVALUE, which further muddies things. The system versioning people went with 'infinity' seemingly because it prescribe and end to the world like SQLServer did, but also because it allowed for a primary key based on (id, endtime) and that's just not possible with NULL endtime values.\n\nI think it's a little weird that our system-time patch mutates your\nprimary key. None of the other RDMBSes do that. I don't think it's\nincompatible (as long as the system time patch knows how to preserve\nthe extra period/range data in an application-time temporal key), but\nit feels messy to me.\n\nI would prefer if system-time and application-time used the same value\nto mean \"unbounded\". Using null means we can support any type (not\njust types with +-Infinity). And it pairs nicely with range types. If\nthe only reason for system-time to use Infinity is the primary key, I\nthink it would be better not to mutate the primary key (and store the\nhistorical records in a separate table as other RDMSes do).\n\nBtw Oracle also uses NULL to mean \"unbounded\".\n\nWe presently forbid PKs from including expressions, but my patch lifts\nthat exception so it can index a rangetype expression built from the\nperiod start & end columns. So even if we must include the system-time\nend column in a PK, perhaps it can use a COALESCE expression to store\nInfinity even while using NULL to signify \"currently true\" from a user\nperspective.\n\n> 3. I noticed some inconsistency in the results from various \"SELECT * FROM portion_of_test\" examples. In some, the \"valid_at\" range is shown but not columns that make it up, and in some others, the \"valid_from\" and \"valid_to\" columns are shown, with no mention of the period. From what I've seen, the period column should be invisible unless invoked, like ctid or xmin.\n\nIn most cases the tests test the same functionality with both PERIODs\nand rangetype columns. For FKs they test all four combinations of\nPERIOD-referencing-PERIOD, PERIOD-referencing-range,\nrange-referencing-PERIOD, and range-referencing-range. If valid_at is\na genuine column, it is included in SELECT *, but not if it is a\nPERIOD.\n\n> 4. The syntax '2018-03-04' AT TIME ZONE INTERVAL '2' HOUR TO MINUTE simply confounded me.\n\nMe too! I have no idea what that is supposed to mean. But that\nbehavior predates my patch. I only had to deal with it because it\ncreates a shift-reduce conflict with `FOR PORTION OF valid_at FROM x\nTO y`, where x & y are expressions. I asked about this syntax at my\nPgCon 2020 talk, but I haven't ever received an answer. Perhaps\nsomeone else knows what this kind of INTERVAL means (as a modifier of\na time value).\n\n> 5. I haven't seen any actual syntax conflicts between this patch and the system versioning patch. Both teams added basically the same keywords, though I haven't dove more deeply into any bison incompatibilities. Still, it's a great start.\n\nI think that's right. Early on the other patch used `FOR PERIOD SYSTEM\nTIME (x, y)` instead of the standard `FOR PERIOD SYSTEM_TIME (x, y)`\nbut I believe that was fixed, so that the period name is an identifier\nand not two keywords.\n\n> 6. Overall, I'm really excited about what this will mean for data governance in postgres.\n\nMe too, and thank you for the detailed review!\n\nYours,\nPaul\n\n\n", "msg_date": "Mon, 13 Sep 2021 20:56:47 -0700", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "In IBM DB2 you can only have one because application-time periods must\n> be named \"business_time\" (not joking).\n>\n\nI saw that as well, and it made me think that someone at IBM is a fan of\nFlight Of The Conchords.\n\n\n> Personally I feel like it's a weird limitation and I wouldn't mind\n> supporting more, but my current implementation only allows for one,\n> and I'd have to rethink some things to do it differently.\n>\n\nI'm satisfied that it's not something we need to do in the first MVP.\n\n\n>\n> Yes. Even though the name \"SYSTEM_TIME\" is technically enough, I'd\n> still include a pertype column to make distinguishing system vs\n> application periods easier and more obvious.\n>\n\nSYSTEM_TIME seems to allow for DATE values in the start_time and end_time\nfields, though I cannot imagine how that would ever be practical, unless it\nwere somehow desirable to reject subsequent updates within a 24 hour\ntimeframe. I have seen instances where home-rolled application periods used\ndate values, which had similar problems where certain intermediate updates\nwould simply have to be discarded in favor of the one that was still\nstanding at midnight.\n\n\n>\n> > 2. The system versioning effort has chosen 'infinity' as their end-time\n> value, whereas you have chosen NULL as that makes sense for an unbounded\n> range. Other databases seem to leverage '9999-12-31 23:59:59' (SQLServer,\n> IIRC) whereas some others seem to used '2999-12-31 23:59:59' but those\n> might have been home-rolled temporal implementations. To further add to the\n> confusion, the syntax seems to specify the keyword of MAXVALUE, which\n> further muddies things. The system versioning people went with 'infinity'\n> seemingly because it prescribe and end to the world like SQLServer did, but\n> also because it allowed for a primary key based on (id, endtime) and that's\n> just not possible with NULL endtime values.\n>\n> I think it's a little weird that our system-time patch mutates your\n> primary key. None of the other RDMBSes do that. I don't think it's\n> incompatible (as long as the system time patch knows how to preserve\n> the extra period/range data in an application-time temporal key), but\n> it feels messy to me.\n>\n\nPer outline below, I'm proposing an alternate SYSTEM_TIME implementation\nthat would leave the PK as-is.\n\n\n> I would prefer if system-time and application-time used the same value\n> to mean \"unbounded\". Using null means we can support any type (not\n> just types with +-Infinity). And it pairs nicely with range types. If\n> the only reason for system-time to use Infinity is the primary key, I\n> think it would be better not to mutate the primary key (and store the\n> historical records in a separate table as other RDMSes do).\n>\n\nThe two \"big wins\" of infinity seemed (to me) to be:\n\n1. the ability to add \"AND end_time = 'infinity'\" as a cheap way to get\ncurrent rows\n2. clauses like \"WHERE CURRENT_DATE - 3 BETWEEN start_time AND end_time\"\nwould work. Granted, there's very specific new syntax to do that properly,\nbut you know somebody's gonna see the columns and try to do it that way.\n\n\n>\n> Btw Oracle also uses NULL to mean \"unbounded\".\n>\n\nHuh, I missed that one. That is good in that it gives some precedence to\nhow you've approached it.\n\n\n>\n> We presently forbid PKs from including expressions, but my patch lifts\n> that exception so it can index a rangetype expression built from the\n> period start & end columns. So even if we must include the system-time\n> end column in a PK, perhaps it can use a COALESCE expression to store\n> Infinity even while using NULL to signify \"currently true\" from a user\n> perspective.\n>\n\nEither way seems viable, but I understand why you want to leverage ranges\nin this way.\n\n\n>\n> > 3. I noticed some inconsistency in the results from various \"SELECT *\n> FROM portion_of_test\" examples. In some, the \"valid_at\" range is shown but\n> not columns that make it up, and in some others, the \"valid_from\" and\n> \"valid_to\" columns are shown, with no mention of the period. From what I've\n> seen, the period column should be invisible unless invoked, like ctid or\n> xmin.\n>\n> In most cases the tests test the same functionality with both PERIODs\n> and rangetype columns. For FKs they test all four combinations of\n> PERIOD-referencing-PERIOD, PERIOD-referencing-range,\n> range-referencing-PERIOD, and range-referencing-range. If valid_at is\n> a genuine column, it is included in SELECT *, but not if it is a\n> PERIOD.\n>\n\nOk, I'll have to look back over the test coverage to make sure that I\nunderstand the behavior now.\n\n\n>\n> > 4. The syntax '2018-03-04' AT TIME ZONE INTERVAL '2' HOUR TO MINUTE\n> simply confounded me.\n>\n> Me too! I have no idea what that is supposed to mean. But that\n> behavior predates my patch. I only had to deal with it because it\n> creates a shift-reduce conflict with `FOR PORTION OF valid_at FROM x\n> TO y`, where x & y are expressions. I asked about this syntax at my\n> PgCon 2020 talk, but I haven't ever received an answer. Perhaps\n> someone else knows what this kind of INTERVAL means (as a modifier of\n> a time value).\n>\n\nI think I'll open this as a separate thread, because it would simplify\nmatters if we can reject this nonsense syntax.\n\n\nThis was the alternative method of system versioning I proposed recently in\nthe system versioning thread\n\n1. The regular table remains unchanged, but a pg_class attribute named\n\"relissystemversioned\" would be set to true\n\n2. I'm unsure if the standard allows dropping a column from a table while\nit is system versioned, and the purpose behind system versioning makes me\nbelieve the answer is a strong \"no\" and requiring DROP COLUMN to fail\non relissystemversioned = 't' seems pretty straightforward.\n3. The history table would be given a default name of $FOO_history (space\npermitting), but could be overridden with the history_table option.\n4. The history table would have relkind = 'h'\n5. The history table will only have rows that are not current, so it is\ncreated empty.\n6. As such, the table is effectively append-only, in a way that vacuum can\nactually leverage, and likewise the fill factor of such a table should\nnever be less than 100.\n7. The history table could only be updated only via system defined triggers\n(insert,update,delete, alter to add columns), or row migration similar to\nthat found in partitioning. It seems like this would work as the two tables\nworking as partitions of the same table, but presently we can't have\nmulti-parent partitions.\n8. The history table would be indexed the same as the base table, except\nthat all unique indexes would be made non-unique, and an index of pk +\nstart_time + end_time would be added\n9. The primary key of the base table would remain the existing pk vals, and\nwould basically function normally, with triggers to carry forth changes to\nthe history table. The net effect of this is that the end_time value of all\nrows in the main table would always be the chosen \"current\" value\n(infinity, null, 9999-12-31, etc) and as such might not actually _need_ to\nbe stored.\n10. Queries that omit the FOR SYSTEM_TIME clause, as well as ones that use\nFOR SYSTEM_TIME AS OF CURRENT_TIMESTAMP, would simply use the base table\ndirectly with no quals to add.\n11. Queries that use FOR SYSTEM_TIME and not FOR SYSTEM_TIME AS\nOF CURRENT_TIMESTAMP, then the query would do a union of the base table and\nthe history table with quals applied to both.\n12. It's a fair question whether the history table would be something that\ncould be queried directly. I'm inclined to say no, because that allows for\nthings like SELECT FOR UPDATE, which of course we'd have to reject.\n13. If a history table is directly referenceable, then SELECT permission\ncan be granted or revoked as normal, but all insert/update/delete/truncate\noptions would raise an error.\n14. DROP SYSTEM VERSIONING from a table would be quite straightforward -\nthe history table would be dropped along with the triggers that reference\nit, setting relissystemversioned = 'f' on the base table.\n\n\nThe benefits to your effort here would be:\n\n1. No change to the primary key except for the ones dictated by application\nperiod\n2. The INFORMATION_SCHEMA view need merely take into account The\npg_class.relkind = 'h' entries\n3. system versioning is no longer mutating (trigger on X updates X), which\neliminates the possibility that application period triggers get into a loop\n4. DROP SYSTEM VERSIONING would be entirely transparent to application\nversioning.\n\nThoughts?\n\nIn IBM DB2 you can only have one because application-time periods must\nbe named \"business_time\" (not joking).I saw that as well, and it made me think that someone at IBM is a fan of Flight Of The Conchords. Personally I feel like it's a weird limitation and I wouldn't mind\nsupporting more, but my current implementation only allows for one,\nand I'd have to rethink some things to do it differently.I'm satisfied that it's not something we need to do in the first MVP. \nYes. Even though the name \"SYSTEM_TIME\" is technically enough, I'd\nstill include a pertype column to make distinguishing system vs\napplication periods easier and more obvious.SYSTEM_TIME seems to allow for DATE values in the start_time and end_time fields, though I cannot imagine how that would ever be practical, unless it were somehow desirable to reject subsequent updates within a 24 hour timeframe. I have seen instances where home-rolled application periods used date values, which had similar problems where certain intermediate updates would simply have to be discarded in favor of the one that was still standing at midnight. \n\n> 2. The system versioning effort has chosen 'infinity' as their end-time value, whereas you have chosen NULL as that makes sense for an unbounded range. Other databases seem to leverage '9999-12-31 23:59:59' (SQLServer, IIRC) whereas some others seem to used '2999-12-31 23:59:59' but those might have been home-rolled temporal implementations. To further add to the confusion, the syntax seems to specify the keyword of MAXVALUE, which further muddies things. The system versioning people went with 'infinity' seemingly because it prescribe and end to the world like SQLServer did, but also because it allowed for a primary key based on (id, endtime) and that's just not possible with NULL endtime values.\n\nI think it's a little weird that our system-time patch mutates your\nprimary key. None of the other RDMBSes do that. I don't think it's\nincompatible (as long as the system time patch knows how to preserve\nthe extra period/range data in an application-time temporal key), but\nit feels messy to me.Per outline below, I'm proposing an alternate SYSTEM_TIME implementation that would leave the PK as-is. I would prefer if system-time and application-time used the same value\nto mean \"unbounded\". Using null means we can support any type (not\njust types with +-Infinity). And it pairs nicely with range types. If\nthe only reason for system-time to use Infinity is the primary key, I\nthink it would be better not to mutate the primary key (and store the\nhistorical records in a separate table as other RDMSes do).The two  \"big wins\" of infinity seemed (to me) to be:1. the ability to add \"AND end_time = 'infinity'\" as a cheap way to get current rows2. clauses like \"WHERE CURRENT_DATE - 3 BETWEEN start_time AND end_time\" would work. Granted, there's very specific new syntax to do that properly, but you know somebody's gonna see the columns and try to do it that way. \n\nBtw Oracle also uses NULL to mean \"unbounded\".Huh, I missed that one. That is good in that it gives some precedence to how you've approached it. \n\nWe presently forbid PKs from including expressions, but my patch lifts\nthat exception so it can index a rangetype expression built from the\nperiod start & end columns. So even if we must include the system-time\nend column in a PK, perhaps it can use a COALESCE expression to store\nInfinity even while using NULL to signify \"currently true\" from a user\nperspective.Either way seems viable, but I understand why you want to leverage ranges in this way. \n\n> 3. I noticed some inconsistency in the results from various \"SELECT * FROM portion_of_test\" examples. In some, the \"valid_at\" range is shown but not columns that make it up, and in some others, the \"valid_from\" and \"valid_to\" columns are shown, with no mention of the period. From what I've seen, the period column should be invisible unless invoked, like ctid or xmin.\n\nIn most cases the tests test the same functionality with both PERIODs\nand rangetype columns. For FKs they test all four combinations of\nPERIOD-referencing-PERIOD, PERIOD-referencing-range,\nrange-referencing-PERIOD, and range-referencing-range. If valid_at is\na genuine column, it is included in SELECT *, but not if it is a\nPERIOD.Ok, I'll have to look back over the test coverage to make sure that I understand the behavior now. \n\n> 4. The syntax '2018-03-04' AT TIME ZONE INTERVAL '2'  HOUR TO MINUTE simply confounded me.\n\nMe too! I have no idea what that is supposed to mean. But that\nbehavior predates my patch. I only had to deal with it because it\ncreates a shift-reduce conflict with `FOR PORTION OF valid_at FROM x\nTO y`, where x & y are expressions. I asked about this syntax at my\nPgCon 2020 talk, but I haven't ever received an answer. Perhaps\nsomeone else knows what this kind of INTERVAL means (as a modifier of\na time value).I think I'll open this as a separate thread, because it would simplify matters if we can reject this nonsense syntax. This was the alternative method of system versioning I proposed recently in the system versioning thread1. The regular table remains unchanged, but a pg_class attribute named \"relissystemversioned\" would be set to true2. I'm unsure if the standard allows dropping a column from a table while it is system versioned, and the purpose behind system versioning makes me believe the answer is a strong \"no\" and requiring DROP COLUMN to fail on relissystemversioned = 't' seems pretty straightforward.3. The history table would be given a default name of $FOO_history (space permitting), but could be overridden with the history_table option.4. The history table would have relkind = 'h'5. The history table will only have rows that are not current, so it is created empty.6. As such, the table is effectively append-only, in a way that vacuum can actually leverage, and likewise the fill factor of such a table should never be less than 100.7. The history table could only be updated only via system defined triggers (insert,update,delete, alter to add columns), or row migration similar to that found in partitioning. It seems like this would work as the two tables working as partitions of the same table, but presently we can't have multi-parent partitions.8. The history table would be indexed the same as the base table, except that all unique indexes would be made non-unique, and an index of pk + start_time + end_time would be added9. The primary key of the base table would remain the existing pk vals, and would basically function normally, with triggers to carry forth changes to the history table. The net effect of this is that the end_time value of all rows in the main table would always be the chosen \"current\" value (infinity, null, 9999-12-31, etc) and as such might not actually _need_ to be stored.10. Queries that omit the FOR SYSTEM_TIME clause, as well as ones that use FOR SYSTEM_TIME AS OF CURRENT_TIMESTAMP, would simply use the base table directly with no quals to add.11. Queries that use FOR SYSTEM_TIME and not FOR SYSTEM_TIME AS OF CURRENT_TIMESTAMP, then the query would do a union of the base table and the history table with quals applied to both.12. It's a fair question whether the history table would be something that could be queried directly. I'm inclined to say no, because that allows for things like SELECT FOR UPDATE, which of course we'd have to reject.13. If a history table is directly referenceable, then SELECT permission can be granted or revoked as normal, but all insert/update/delete/truncate options would raise an error.14. DROP SYSTEM VERSIONING from a table would be quite straightforward - the history table would be dropped along with the triggers that reference it, setting relissystemversioned = 'f' on the base table.The benefits to your effort here would be:1. No change to the primary key except for the ones dictated by application period2. The INFORMATION_SCHEMA view need merely take into account The pg_class.relkind = 'h' entries3. system versioning is no longer mutating (trigger on X updates X), which eliminates the possibility that application period triggers get into a loop4. DROP SYSTEM VERSIONING would be entirely transparent to application versioning.Thoughts?", "msg_date": "Sat, 18 Sep 2021 20:46:16 -0400", "msg_from": "Corey Huinker <corey.huinker@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Sat, Sep 18, 2021 at 5:46 PM Corey Huinker <corey.huinker@gmail.com>\nwrote:\n\n> SYSTEM_TIME seems to allow for DATE values in the start_time and end_time\n> fields,\n> though I cannot imagine how that would ever be practical, unless it were\n> somehow\n> desirable to reject subsequent updates within a 24 hour timeframe.\n>\n\nI agree that for SYSTEM_TIME it doesn't make much sense to use anything but\nthe smallest time granularity.\n\nThe two \"big wins\" of infinity seemed (to me) to be:\n>\n> 1. the ability to add \"AND end_time = 'infinity'\" as a cheap way to get\n> current rows\n> 2. clauses like \"WHERE CURRENT_DATE - 3 BETWEEN start_time AND end_time\"\n> would work.\n>\n\nYes. OTOH there is equivalent syntax for ranges, e.g. `valid_at @> now()`.\nBut if you had a real PERIOD then that wouldn't be available, since you\ncan't use a PERIOD as an expression. Personally I think that's a shame, and\nI wonder if PERIODs should be another kind of expression (much like a\ncolumn value) that evaluates to an equivalent range. Then you'd get all\nkinds of operators & functions that work with them, you could `SELECT`\nthem, `GROUP BY` them, pass them to functions, etc.\n\nThe spec doesn't say anything about using PERIODs in those places, but it\n*does* have a section on period *predicates*, which seem to be allowed\nanywhere you can put an expression. The spec's discussion of this is in\n4.14.2 (\"Operations involving periods\") and 8.20 (\"<period predicate>\"),\nand says there should be predicates for overlap, equals, contains,\nprecedes, succeeds, immediately precedes, and immediately succeeds. So in\nthe spec, the smallest possible \"element\" is not a bare PERIOD, but rather\nthese predicates. My patch doesn't include these (it's a lot of new\nsyntax), and no other RDBMS seems to have implemented them. I'm inclined to\njust treat PERIODs like ranges, or at least maybe let you cast from one to\nanother. (Casting is weird though since if a bare PERIOD isn't a valid\nexpression, what are you casting from/to?)\n\nI should add that using +-Infinity for application-time bounds is\ncompletely acceptable under my patch; you just have the option to use NULL\ninstead. So your examples of filtering above are fine. There aren't any\noperations where we have to set a bounded rangepart to unbounded, so we\nnever pass a NULL; only the user would do that. We do bless NULLs by\ntranslating MINVALUE/MAXVALUE to NULL, but that is necessary to support\narbitrary types. Even that could be refined so that we use +-Infinity when\navailable but NULL elsewhere. Or we could just drop MINVALUE/MAXVALUE\nentirely. It's my own addition to make sentinels less arbitrary; it's not\nin the standard.\n\nOne of my design goals was to let people favor ranges over PERIODs if they\nlike. Forcing people to use +-Infinity doesn't completely eliminate that\ngoal, but it does mean your ranges are different than you're used to seeing\n(`[2020-01-01, Infinity)' vs [2020-01-01,)`. More importantly you can only\nuse {date,ts,tstz}range for application-time periods, not other rangetypes.\nSo I'd prefer to keep NULL bounds *possible*, even if MINVALUE/MAXVALUE\naren't giving it a sanction.\n\nThis was the alternative method of system versioning I proposed recently in\n> the system versioning thread\n> 1. The regular table remains unchanged, but a pg_class attribute named\n> \"relissystemversioned\" would be set to true\n> 2. I'm unsure if the standard allows dropping a column from a table\n> while it is system versioned, and the purpose behind system versioning\n> makes me believe the answer is a strong \"no\" and requiring DROP COLUMN to\n> fail on relissystemversioned = 't' seems pretty straightforward.\n> 3. The history table would be given a default name of $FOO_history\n> (space permitting), but could be overridden with the history_table option.\n> 4. The history table would have relkind = 'h'\n>\n\n+1 so far. Behavior of DDL in temporal tables is almost untouched even in\nthe academic literature I've read. (My bibliography mentions a few places\nthat at least mention that it's a hard problem.) Forbidding to drop a\ncolumn seems pretty harsh---but on the other hand that's just the tip of\nthe iceberg, so failing is probably the practical choice. For example what\nhappens to old rows if you add a NOT NULL constraint? For application-time\nwe can make the user responsible for figuring out the most sensible thing,\nbut for SYSTEM_TIME we have to figure that out ourselves. But what about\ncolumn type changes, or domains? What about removing an enum option? Or\nadding a CHECK constraint? With SYSTEM_TIME the user is supposed to be\nunable to change the history data, so they can't accommodate it to future\nrequirements.\n\n 5. The history table will only have rows that are not current, so it is\n> created empty.\n> 6. As such, the table is effectively append-only, in a way that vacuum\n> can actually leverage, and likewise the fill factor of such a table should\n> never be less than 100.\n> 7. The history table could only be updated only via system defined\n> triggers (insert,update,delete, alter to add columns), or row migration\n> similar to that found in partitioning. It seems like this would work as the\n> two tables working as partitions of the same table, but presently we can't\n> have multi-parent partitions.\n>\n\nI don't think they should be sibling partitions, but I do think it would be\ncool if you could ask for the history table to be partitioned. Mariadb\noffers a way to do this (see my blog post comparing SQL:2011\nimplementations). It doesn't have to be in the first patch though, and it's\nnot part of the standard.\n\n 8. The history table would be indexed the same as the base table,\n> except that all unique indexes would be made non-unique, and an index of pk\n> + start_time + end_time would be added\n>\n\nIs there any value to indexing both start_time and end_time? Just one\nalready takes you to a single row.\n\nThe system-time code would need to know how to handle application-time PKs\nsince they are a little different, but that's not hard. And it still is\njust adding a column (or two if you think they should both be there).\n\nThe history table also should not have any FKs, and no FKs should reference\nit.\n\n 9. The primary key of the base table would remain the existing pk vals,\n> and would basically function normally, with triggers to carry forth changes\n> to the history table. The net effect of this is that the end_time value of\n> all rows in the main table would always be the chosen \"current\" value\n> (infinity, null, 9999-12-31, etc) and as such might not actually _need_ to\n> be stored.\n>\n\nInteresting thought that we wouldn't really even need to store the end\ntime. I don't have an opinion about whether the optimization is worth the\ncomplexity, but yeah it seems possible.\n\n 10. Queries that omit the FOR SYSTEM_TIME clause, as well as ones that\n> use FOR SYSTEM_TIME AS OF CURRENT_TIMESTAMP, would simply use the base\n> table directly with no quals to add.\n> 11. Queries that use FOR SYSTEM_TIME and not FOR SYSTEM_TIME AS OF\n> CURRENT_TIMESTAMP, then the query would do a union of the base table and\n> the history table with quals applied to both.\n>\n\nI like this, but it means people can't filter directly on the columns\nthemselves as you suggest above. Can we detect when they're doing that?\nKeep in mind it might be happening inside a user-defined function, etc. So\nperhaps it is safer to always use the UNION.\n\n 12. It's a fair question whether the history table would be something\n> that could be queried directly. I'm inclined to say no, because that allows\n> for things like SELECT FOR UPDATE, which of course we'd have to reject.\n> 13. If a history table is directly referenceable, then SELECT\n> permission can be granted or revoked as normal, but all\n> insert/update/delete/truncate options would raise an error.\n>\n\nIt seems to break the abstraction to let people query the history table\ndirectly. OTOH sometimes it's helpful to see behind the curtain. I could go\neither way here, but I slightly favor letting people do it.\n\n 14. DROP SYSTEM VERSIONING from a table would be quite straightforward\n> - the history table would be dropped along with the triggers that reference\n> it, setting relissystemversioned = 'f' on the base table.\n>\n\nI like this approach a lot, and I think it's a better design than carrying\nall the history inside the main table. I also like how bitemporal will Just\nWork^TM. One is in user-space and the other is controlled by\nPostgres---which fits the intention.\n\nYours,\nPaul\n\nOn Sat, Sep 18, 2021 at 5:46 PM Corey Huinker <corey.huinker@gmail.com> wrote:SYSTEM_TIME seems to allow for DATE values in the start_time and end_time fields,though I cannot imagine how that would ever be practical, unless it were somehowdesirable to reject subsequent updates within a 24 hour timeframe.I agree that for SYSTEM_TIME it doesn't make much sense to use anything but the smallest time granularity.The two  \"big wins\" of infinity seemed (to me) to be:1. the ability to add \"AND end_time = 'infinity'\" as a cheap way to get current rows2. clauses like \"WHERE CURRENT_DATE - 3 BETWEEN start_time AND end_time\" would work.Yes. OTOH there is equivalent syntax for ranges, e.g. `valid_at @> now()`. But if you had a real PERIOD then that wouldn't be available, since you can't use a PERIOD as an expression. Personally I think that's a shame, and I wonder if PERIODs should be another kind of expression (much like a column value) that evaluates to an equivalent range. Then you'd get all kinds of operators & functions that work with them, you could `SELECT` them, `GROUP BY` them, pass them to functions, etc.The spec doesn't say anything about using PERIODs in those places, but it *does* have a section on period *predicates*, which seem to be allowed anywhere you can put an expression. The spec's discussion of this is in 4.14.2 (\"Operations involving periods\") and 8.20 (\"<period predicate>\"), and says there should be predicates for overlap, equals, contains, precedes, succeeds, immediately precedes, and immediately succeeds. So in the spec, the smallest possible \"element\" is not a bare PERIOD, but rather these predicates. My patch doesn't include these (it's a lot of new syntax), and no other RDBMS seems to have implemented them. I'm inclined to just treat PERIODs like ranges, or at least maybe let you cast from one to another. (Casting is weird though since if a bare PERIOD isn't a valid expression, what are you casting from/to?)I should add that using +-Infinity for application-time bounds is completely acceptable under my patch; you just have the option to use NULL instead. So your examples of filtering above are fine. There aren't any operations where we have to set a bounded rangepart to unbounded, so we never pass a NULL; only the user would do that. We do bless NULLs by translating MINVALUE/MAXVALUE to NULL, but that is necessary to support arbitrary types. Even that could be refined so that we use +-Infinity when available but NULL elsewhere. Or we could just drop MINVALUE/MAXVALUE entirely. It's my own addition to make sentinels less arbitrary; it's not in the standard.One of my design goals was to let people favor ranges over PERIODs if they like. Forcing people to use +-Infinity doesn't completely eliminate that goal, but it does mean your ranges are different than you're used to seeing (`[2020-01-01, Infinity)' vs [2020-01-01,)`. More importantly you can only use {date,ts,tstz}range for application-time periods, not other rangetypes. So I'd prefer to keep NULL bounds *possible*, even if MINVALUE/MAXVALUE aren't giving it a sanction.This was the alternative method of system versioning I proposed recently in the system versioning thread    1. The regular table remains unchanged, but a pg_class attribute named \"relissystemversioned\" would be set to true    2. I'm unsure if the standard allows dropping a column from a table while it is system versioned, and the purpose behind system versioning makes me believe the answer is a strong \"no\" and requiring DROP COLUMN to fail on relissystemversioned = 't' seems pretty straightforward.    3. The history table would be given a default name of $FOO_history (space permitting), but could be overridden with the history_table option.    4. The history table would have relkind = 'h'+1 so far. Behavior of DDL in temporal tables is almost untouched even in the academic literature I've read. (My bibliography mentions a few places that at least mention that it's a hard problem.) Forbidding to drop a column seems pretty harsh---but on the other hand that's just the tip of the iceberg, so failing is probably the practical choice. For example what happens to old rows if you add a NOT NULL constraint? For application-time we can make the user responsible for figuring out the most sensible thing, but for SYSTEM_TIME we have to figure that out ourselves. But what about column type changes, or domains? What about removing an enum option? Or adding a CHECK constraint? With SYSTEM_TIME the user is supposed to be unable to change the history data, so they can't accommodate it to future requirements.    5. The history table will only have rows that are not current, so it is created empty.    6. As such, the table is effectively append-only, in a way that vacuum can actually leverage, and likewise the fill factor of such a table should never be less than 100.    7. The history table could only be updated only via system defined triggers (insert,update,delete, alter to add columns), or row migration similar to that found in partitioning. It seems like this would work as the two tables working as partitions of the same table, but presently we can't have multi-parent partitions.I don't think they should be sibling partitions, but I do think it would be cool if you could ask for the history table to be partitioned. Mariadb offers a way to do this (see my blog post comparing SQL:2011 implementations). It doesn't have to be in the first patch though, and it's not part of the standard.    8. The history table would be indexed the same as the base table, except that all unique indexes would be made non-unique, and an index of pk + start_time + end_time would be addedIs there any value to indexing both start_time and end_time? Just one already takes you to a single row.The system-time code would need to know how to handle application-time PKs since they are a little different, but that's not hard. And it still is just adding a column (or two if you think they should both be there).The history table also should not have any FKs, and no FKs should reference it.    9. The primary key of the base table would remain the existing pk vals, and would basically function normally, with triggers to carry forth changes to the history table. The net effect of this is that the end_time value of all rows in the main table would always be the chosen \"current\" value (infinity, null, 9999-12-31, etc) and as such might not actually _need_ to be stored.Interesting thought that we wouldn't really even need to store the end time. I don't have an opinion about whether the optimization is worth the complexity, but yeah it seems possible.    10. Queries that omit the FOR SYSTEM_TIME clause, as well as ones that use FOR SYSTEM_TIME AS OF CURRENT_TIMESTAMP, would simply use the base table directly with no quals to add.    11. Queries that use FOR SYSTEM_TIME and not FOR SYSTEM_TIME AS OF CURRENT_TIMESTAMP, then the query would do a union of the base table and the history table with quals applied to both.I like this, but it means people can't filter directly on the columns themselves as you suggest above. Can we detect when they're doing that? Keep in mind it might be happening inside a user-defined function, etc. So perhaps it is safer to always use the UNION.    12. It's a fair question whether the history table would be something that could be queried directly. I'm inclined to say no, because that allows for things like SELECT FOR UPDATE, which of course we'd have to reject.    13. If a history table is directly referenceable, then SELECT permission can be granted or revoked as normal, but all insert/update/delete/truncate options would raise an error.It seems to break the abstraction to let people query the history table directly. OTOH sometimes it's helpful to see behind the curtain. I could go either way here, but I slightly favor letting people do it.    14. DROP SYSTEM VERSIONING from a table would be quite straightforward - the history table would be dropped along with the triggers that reference it, setting relissystemversioned = 'f' on the base table.I like this approach a lot, and I think it's a better design than carrying all the history inside the main table. I also like how bitemporal will Just Work^TM. One is in user-space and the other is controlled by Postgres---which fits the intention.Yours,Paul", "msg_date": "Fri, 1 Oct 2021 13:47:57 -0700", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Here are some new patches rebased on the latest master.\n\nI haven't made any substantive changes, but I should have time soon to \ntake a stab at supporting partitioned tables and removing some of my own \nTODOs (things like making sure I'm locking things correctly). I don't \nthink there is any outstanding feedback other than that.\n\nBut in the meantime here are some up-to-date patches.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Tue, 16 Nov 2021 15:55:26 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Tue, Nov 16, 2021 at 3:55 PM Paul Jungwirth <pj@illuminatedcomputing.com>\nwrote:\n\n> I haven't made any substantive changes, but I should have time soon to\n> take a stab at supporting partitioned tables and removing some of my own\n> TODOs (things like making sure I'm locking things correctly).\n>\n\nHello,\n\nHere are updated patches. They are rebased and clean up some of my TODOs.\nHere is what remains:\n\n- Various TODOs asking for advice about concurrency things: where to lock,\nwhen to copy structs, etc. I'd appreciate some review on these from someone\nmore experienced than me.\n\n- Supporting FOR PORTION OF against updateable views. I'll keep working on\nthis, but I thought there was enough progress to pass along new patches in\nthe meantime.\n\n- Support partitioned tables. I think this is a medium-size effort, and I'm\nnot sure whether it's really needed for pg 15 or something we can add\nlater. I'm going to do my best to get it done though. (I should have more\ntime for this project now: having a sixth baby recently made side projects\nchallenging for a while, but lately things have been getting easier.)\nPartitioning could use some design discussion though, both for application\ntime alone and for bitemporal tables (so overlapping with the system time\nwork). Here are some thoughts so far:\n\n - Creating a PERIOD on a partitioned table should automatically create\nthe PERIOD (and associated constraints) on the child tables. This one seems\neasy and I'll try to get it done soon.\n\n - Sort of related, but not strictly partitioning: CREATE TABLE LIKE\nshould have a new INCLUDING PERIODS option. (I'm tempted to include this\nunder INCLUDING CONSTRAINTS, but I think a separate option is nicer since\nit gives more control.)\n\n - If you partition by something in the scalar part of the temporal PK,\nthat's easy. I don't think we have to do anything special there. I'd like\nto add some tests about it though.\n\n - We should allow temporal primary keys on the top-level partitioned\ntable, even though they are essentially exclusion constraints. Whereas in\nthe general case an exclusion constraint cannot prove its validity across\nall the tables, a temporal PK *can* prove its validity so long the\npartition key includes at least one scalar part of the temporal PK (so that\nall records for one \"entity\" get routed to the same table).\n\n - If you partition by the temporal part of the temporal PK, things are\nharder. I'm inclined to forbid this, at least for v15. Suppose you\npartition by the start time. Then you wind up with the same entity spread\nacross several tables, so you can't validate the overall exclusion\nconstraint anymore.\n\n - OTOH you *could* partition by application-time itself (not start time\nalone nor end time alone) where each partition has application-time\nranges/periods that are trimmed to fit within that partition's limits. Then\nsince each partition is responsible for a non-overlapping time period, you\ncould validate the overall exclusion constraint. You'd just have to add\nsome logic to tuple re-routing that could transform single records into\nmultiple records. For example if each partition holds a different year and\nyou INSERT a record that is valid for a decade, you'd have to insert one\nrow into ten partitions, and change the application-time range/period of\neach row appropriately. This is a special kind of range partitioning. I\ndon't have any ideas how to make hash or list partitioning work on the\ntemporal part of the PK. I don't think we should allow it.\n\n - Partitioning by application time requires no special syntax.\nPartitioning by system time (if that's desired) would probably require\nextra (non-standard) syntax. Mariadb has this:\nhttps://mariadb.com/kb/en/system-versioned-tables/#storing-the-history-separately\nPerhaps that is orthogonal to application-time partitioning though. It\nsounds like people think we should store non-current system time in a\nseparate table (I agree), and in that case I think a bitemporal table that\nis partitioned by scalar keys or application-time would just have a\nseparate system-time history table for each partition, and that would Just\nWork. And if we *do* want to partition by system time too, then it would be\ntransparent to the application-time logic.\n\n - Since system time doesn't add anything to your PK (or at least it\nshouldn't), there is no extra complexity around dealing with exclusion\nconstraints. We should just guarantee that all *current* rows land in the\nsame partition, because for a bitemporal table that's the only one that\nneeds a temporal PK. I guess that means you could partition by end\nsystem-time but not start system-time. This would be an exception to the\nrule that a PK must include the partition keys. Instead we'd say that all\ncurrent (i.e. non-historical) records stay together (at the system-time\nlevel of partitioning).\n\n - I don't think system-time partitioning needs to be in v15. It seems\nmore complicated than ordinary partitioning.\n\nYours,\nPaul", "msg_date": "Sat, 20 Nov 2021 17:51:16 -0800", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 21.11.21 02:51, Paul A Jungwirth wrote:\n> Here are updated patches. They are rebased and clean up some of my \n> TODOs.\n\nThis patch set looks very interesting. It's also very big, so it's\ndifficult to see how to get a handle on it. I did a pass through it\nto see if there were any obvious architectural or coding style\nproblems. I also looked at some of your TODO comments to see if I had\nsomething to contribute there.\n\nI'm confused about how to query tables based on application time\nperiods. Online, I see examples using AS OF, but in the SQL standard\nI only see this used for system time, which we are not doing here.\nWhat is your understanding of that?\n\n\nv10-0001-Add-PERIODs.patch\n\nsrc/backend/commands/tablecmds.c\n\nMight be worth explaining somewhere why AT_PASS_ADD_PERIOD needs to be\nits own pass. -- Ah, this is explained in ATPrepCmd(). Maybe that is\nokay, but I would tend to prefer a comprehensive explanation here\nrather than sprinkled around.\n\nmake_period_not_backward(): Hardcoding the name of the operator as \"<\"\nis not good. You should perhaps lookup the less-than operator in the\ntype cache. Look around for TYPECACHE_LT_OPR for how this is usually done.\n\nvalidate_period(): Could use an explanatory comment. There are a\nbunch of output arguments, and it's not clear what all of this is\nsupposed to do, and what \"validating\" is in this context.\n\nMergeAttributes(): I would perhaps initially just prohibit inheritance\nsituations that involve periods on either side. (It should work for\npartitioning, IMO, but that should be easier to arrange.)\n\nAlterTableGetLockLevel(): The choice of AccessExclusiveLock looks\ncorrect. I think the whole thing can also be grouped with some of the\nother \"affects concurrent SELECTs\" cases?\n\nMaybe the node type Period could have a slightly more specific name,\nperhaps PeriodDef, analogous to ColumnDef?\n\nI didn't follow why indexes would have periods, for example, the new\nperiod field in IndexStmt. Is that explained anywhere?\n\nWhile reading this patch I kept wondering whether it would be possible\nto fold periods into pg_attribute, perhaps with negative attribute\nnumbers. Have you looked into something like that? No doubt it's\nalso complicated, but it might simplify some things, like the name\nconflict checking.\n\n\nv10-0002-Add-temporal-PRIMARY-KEY-and-UNIQUE-constraints.patch\n\nsrc/backend/catalog/Catalog.pm: I see you use this change in the\nsubsequent patches, but I would recommend skipping all this. The\ncomments added are kind of redundant with the descr fields anyway.\n\ntransformIndexConstraint(): As above, we can't look up the && operator\nby name. In this case, I suppose we should look it up through the\nindex AM support operators.\n\nFurther, the additions to this function are very complicated and not\nfully explained. I'm suspicious about things like\nfindNewOrOldColumn() -- generally we should look up columns by number\nnot name. Perhaps you can add a header comment or split out the code\nfurther into smaller functions.\n\npg_dump.c getIndexes() has been refactored since to make\nversion-specific additions easier. But your patch is now failing to\napply because of this.\n\nOf course, the main problem in this patch is that for most uses it\nrequires btree_gist. I think we should consider moving that into\ncore, or at least the support for types that are most relevant to this\nfunctionality, specifically the date/time types. Aside from user\nconvenience, this would also allow writing more realistic test cases.\n\n\nv10-0003-Add-UPDATE-DELETE-FOR-PORTION-OF.patch\n\nUse of MINVALUE and MAXVALUE for unbounded seems problematic to me.\n(If it is some value, it is not really larger than any value.) We\nhave the keyword UNBOUNDED, which seems better suited.\n\nsrc/backend/access/brin/brin_minmax_multi.c\n\nThese renaming changes seem unrelated (but still seem like a good\nidea). Should they be progressed separately?\n\nAgain, some hardcoded operator name lookup in this patch.\n\nI don't understand why a temporal primary key is required for doing\nUPDATE FOR PORTION OF. I don't see this in the standard.\n\n\nv10-0004-Add-temporal-FOREIGN-KEYs.patch\n\nDo we really need different trigger names depending on whether the\nforeign key is temporal?\n\nrange_as_string() doesn't appear to be used anywhere.\n\nI ran out of steam on this patch, it's very big. But it seems sound\nin general.\n\n\nHow to proceed. I suppose we could focus on committing 0001 and 0002\nfirst. That would be a sensible feature set even if the remaining\npatches did not make a release. I do feel we need to get btree_gist\ninto core. That might be a big job by itself. I'm also bemused why\nbtree_gist is so bloated compared to btree_gin. btree_gin uses macros\nto eliminate duplicate code where btree_gist is full of\ncopy-and-paste. So there are some opportunities there to make things\nmore compact. Is there anything else you think we can do as\npreparatory work to make the main patches more manageable?\n\n\n", "msg_date": "Wed, 5 Jan 2022 17:07:53 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Wed, Jan 5, 2022 at 11:07 AM Peter Eisentraut <\npeter.eisentraut@enterprisedb.com> wrote:\n\n> On 21.11.21 02:51, Paul A Jungwirth wrote:\n> > Here are updated patches. They are rebased and clean up some of my\n> > TODOs.\n>\n> This patch set looks very interesting. It's also very big, so it's\n> difficult to see how to get a handle on it. I did a pass through it\n> to see if there were any obvious architectural or coding style\n> problems. I also looked at some of your TODO comments to see if I had\n> something to contribute there.\n>\n> I'm confused about how to query tables based on application time\n> periods. Online, I see examples using AS OF, but in the SQL standard\n> I only see this used for system time, which we are not doing here.\n> What is your understanding of that?\n>\n\nPaul has previously supplied me with this document\nhttps://cs.ulb.ac.be/public/_media/teaching/infoh415/tempfeaturessql2011.pdf\nand that formed the basis of a lot of my questions a few months earlier.\n\nThere was similar work being done for system periods, which are a bit\nsimpler but require a side (history) table to be created. I was picking\npeople's brains about some aspects of system versioning to see if I could\nhelp bringing that into this already very large patchset, but haven't yet\nfelt like I had done enough research to post it.\n\nIt is my hope that we can at least get the syntax for both application and\nsystem versioning committed, even if it's just stubbed in with\nnot-yet-supported errors.\n\nOn Wed, Jan 5, 2022 at 11:07 AM Peter Eisentraut <peter.eisentraut@enterprisedb.com> wrote:On 21.11.21 02:51, Paul A Jungwirth wrote:\n> Here are updated patches. They are rebased and clean up some of my \n> TODOs.\n\nThis patch set looks very interesting.  It's also very big, so it's\ndifficult to see how to get a handle on it.  I did a pass through it\nto see if there were any obvious architectural or coding style\nproblems.  I also looked at some of your TODO comments to see if I had\nsomething to contribute there.\n\nI'm confused about how to query tables based on application time\nperiods.  Online, I see examples using AS OF, but in the SQL standard\nI only see this used for system time, which we are not doing here.\nWhat is your understanding of that?Paul has previously supplied me with this document https://cs.ulb.ac.be/public/_media/teaching/infoh415/tempfeaturessql2011.pdf and that formed the basis of a lot of my questions a few months earlier.There was similar work being done for system periods, which are a bit simpler but require a side (history) table to be created. I was picking people's brains about some aspects of system versioning to see if I could help bringing that into this already very large patchset, but haven't yet felt like I had done enough research to post it.It is my hope that we can at least get the syntax for both application and system versioning committed, even if it's just stubbed in with not-yet-supported errors.", "msg_date": "Wed, 5 Jan 2022 17:03:23 -0500", "msg_from": "Corey Huinker <corey.huinker@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Wed, Jan 5, 2022 at 8:07 AM Peter Eisentraut\n<peter.eisentraut@enterprisedb.com> wrote:\n>\n> This patch set looks very interesting.\n\nThank you for the review!\n\nI'll work on your feedback but in the meantime here are replies to\nyour questions:\n\n> I'm confused about how to query tables based on application time\n> periods. Online, I see examples using AS OF, but in the SQL standard\n> I only see this used for system time, which we are not doing here.\n\nCorrect, the standard only gives it for system time. I think\napplication time is intended to be more \"in user space\" so it's fine\nto use regular operators in your WHERE condition against the time\ncolumns, whereas system time is more of a managed thing---automatic,\nread-only, possibly stored in a separate table. Having a special\nsyntax cue lets the RDBMS know it needs to involve the historical\nrecords.\n\n> validate_period(): Could use an explanatory comment. There are a\n> bunch of output arguments, and it's not clear what all of this is\n> supposed to do, and what \"validating\" is in this context.\n\nI'm not too happy with that function, but a previous reviewer asked me\nto factor out what was shared between the CREATE TABLE and ALTER TABLE\ncases. It does some sanity checks on the columns you've chosen, and\nalong the way it collects info about those columns that we'll need\nlater. But yeah all those out parameters are pretty ugly. I'll see if\nI can come up with a stronger abstraction for it, and at the very\nleast I'll add some comments.\n\n> MergeAttributes(): I would perhaps initially just prohibit inheritance\n> situations that involve periods on either side. (It should work for\n> partitioning, IMO, but that should be easier to arrange.)\n\nOkay. I'm glad to hear you think partitioning won't be too hard. It is\none of the last things, but to me it's a bit intimidating.\n\n> I didn't follow why indexes would have periods, for example, the new\n> period field in IndexStmt. Is that explained anywhere?\n\nWhen you create a primary key or a unique constraint (which are backed\nby a unique index), you can give a period name to make it a temporal\nconstraint. We create the index first and then create the constraint\nas a side-effect of that (e.g. index_create calls\nindex_constraint_create). The analysis phase generates an IndexStmt.\nSo I think this was mostly a way to pass the period info down to the\nconstraint. It probably doesn't actually need to be stored on pg_index\nthough. Maybe it does for index_concurrently_create_copy. I'll add\nsome comments, but if you think it's the wrong approach let me know.\n\n> While reading this patch I kept wondering whether it would be possible\n> to fold periods into pg_attribute, perhaps with negative attribute\n> numbers. Have you looked into something like that? No doubt it's\n> also complicated, but it might simplify some things, like the name\n> conflict checking.\n\nHmm, I thought that sort of thing would be frowned upon. :-) But also\nit seems like periods really do have a bunch of details they need\nbeyond what other attributes have (e.g. the two source attributes, the\nmatching range type, the period type (application-vs-system), maybe\nsome extra things for table inheritance.\n\nAlso are you sure we aren't already using negative attnums somewhere\nalready? I thought I saw something like that.\n\n> Of course, the main problem in this patch is that for most uses it\n> requires btree_gist. I think we should consider moving that into\n> core, or at least the support for types that are most relevant to this\n> functionality, specifically the date/time types. Aside from user\n> convenience, this would also allow writing more realistic test cases.\n\nI think this would be great too. How realistic do you think it is? I\nfigured since exclusion constraints are also pretty useless without\nbtree_gist, it wasn't asking too much to have people install the\nextension, but still it'd be better if it were all built in.\n\n> src/backend/access/brin/brin_minmax_multi.c\n>\n> These renaming changes seem unrelated (but still seem like a good\n> idea). Should they be progressed separately?\n\nI can pull this out into a separate patch. I needed to do it because\nwhen I added an `#include <rangetypes.h>` somewhere, these conflicted\nwith the range_{de,}serialize functions declared there.\n\n> I don't understand why a temporal primary key is required for doing\n> UPDATE FOR PORTION OF. I don't see this in the standard.\n\nYou're right, it's not in the standard. I'm doing that because\ncreating the PK is when we add the triggers to implement UPDATE FOR\nPORTION OF. I thought it was acceptable since we also require a\nPK/unique constraint as the referent of a foreign key. But we could\navoid it if I went back to the executor-based FOR PORTION OF\nimplementation, since that doesn't depend on triggers. What do you\nthink?\n\nAlso: I noticed recently that you can't use FOR PORTION OF against an\nupdatable view. I'm working on a new patch set to fix that. But the\nmain reason is this PK check. So that's maybe another reason to go\nback to the executor implementation.\n\n> How to proceed. I suppose we could focus on committing 0001 and 0002\n> first.\n\nThat would be great! I don't think either is likely to conflict with\nfuture system-time work.\n\n> Is there anything else you think we can do as\n> preparatory work to make the main patches more manageable?\n\nI think it would be smart to have a rough plan for how this work will\nbe compatible with system-time support. Corey & I have talked about\nthat a lot, and In general they are orthogonal, but it would be nice\nto have details written down somewhere.\n\nYours,\nPaul\n\n\n", "msg_date": "Wed, 5 Jan 2022 21:44:54 -0800", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 1/5/22 11:03 PM, Corey Huinker wrote:\n> \n> There was similar work being done for system periods, which are a bit\n> simpler but require a side (history) table to be created.\n\nThis is false. SYSTEM_TIME periods do not need any kind of history.\nThis was one of the problems I had with Surafel's attempt because it was\nconfusing the period with SYSTEM VERSIONING. Versioning needs the\nperiod but the inverse is not true.\n-- \nVik Fearing\n\n\n", "msg_date": "Thu, 6 Jan 2022 15:44:58 +0100", "msg_from": "Vik Fearing <vik@postgresfriends.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Thu, Jan 6, 2022 at 6:45 AM Vik Fearing <vik@postgresfriends.org> wrote:\n>\n> On 1/5/22 11:03 PM, Corey Huinker wrote:\n> >\n> > There was similar work being done for system periods, which are a bit\n> > simpler but require a side (history) table to be created.\n>\n> This is false. SYSTEM_TIME periods do not need any kind of history.\n> This was one of the problems I had with Surafel's attempt because it was\n> confusing the period with SYSTEM VERSIONING. Versioning needs the\n> period but the inverse is not true.\n\nThis is an interesting point. Syntactically, there are three different\nthings: the generated started/end columns, the period declaration, and\nthe WITH SYSTEM VERSIONING modifier to the table. You could declare a\nsystem period without making the table versioned. Practically speaking\nI don't know why you'd ever create a system period without a versioned\ntable (do you know of any uses Vik?), but perhaps we can exploit the\nseparation to add system periods in the same patch that adds\napplication periods.\n\nThe first two bits of syntax *are* tied together: you need columns\nwith GENERATED ALWAYS AS ROW START/END to declare the system period,\nand less intuitively the standard says you can't use AS ROW START/END\nunless those columns appear in a system period (2.e.v.2 under Part 2:\nFoundation, 11.3 <table definition>). Personally I'd be willing to\nignore that latter requirement. For one thing, what does Postgres do\nwith the columns if you drop the period? Dropping the columns\naltogether seems very harsh, so I guess you'd just remove the\nGENERATED clause.\n\nAnother weird thing is that you don't (can't) say STORED for those\ncolumns. But they are certainly stored somewhere. I would store the\nvalues just like any other column (even if non-current rows get moved\nto a separate table). Also then you don't have to do anything extra\nwhen the GENERATED clause is dropped.\n\nIf we wanted to support system-time periods without building all of\nsystem versioning, what would that look like? At first I thought it\nwould be a trivial addition to part-1 of the patch here, but the more\nI think about it the more it seems to deserve its own patch.\n\nOne rule I think we should follow is that using a non-system-versioned\ntable (with a system period) should get you to the same place as using\na system-versioned table and then removing the system versioning. But\nthe standard says that dropping system versioning should automatically\ndrop all historical records (2 under Part 2: Foundation, 11.30 <drop\nsystem versioning clause>). That actually makes sense though: when you\ndo DML we automatically update the start/end columns, but we don't\nsave copies of the previous data (and incidentally the end column will\nalways be the max value.) So there is a use case, albeit a thin one:\nyou get a Rails-like updated_at column that is maintained\nautomatically by your RDBMS. That is pretty easy, but I think I'd\nstill break it out into a separate patch. I'm happy to work on that as\nsomething that builds on top of my part-1 patch here.\n\nYours,\nPaul\n\n\n", "msg_date": "Thu, 6 Jan 2022 10:08:54 -0800", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": ">\n>\n> But\n> the standard says that dropping system versioning should automatically\n> drop all historical records (2 under Part 2: Foundation, 11.30 <drop\n> system versioning clause>). That actually makes sense though: when you\n> do DML we automatically update the start/end columns, but we don't\n> save copies of the previous data (and incidentally the end column will\n> always be the max value.)\n\n\nThis is what I was referring to when I mentioned a side-table.\ndeleting history would be an O(1) operation. Any other\nmisunderstandings are all mine.\n\n But\nthe standard says that dropping system versioning should automatically\ndrop all historical records (2 under Part 2: Foundation, 11.30 <drop\nsystem versioning clause>). That actually makes sense though: when you\ndo DML we automatically update the start/end columns, but we don't\nsave copies of the previous data (and incidentally the end column will\nalways be the max value.) This is what I was referring to when I mentioned a side-table. deleting history would be an O(1) operation. Any other misunderstandings are all mine.", "msg_date": "Thu, 6 Jan 2022 13:52:30 -0500", "msg_from": "Corey Huinker <corey.huinker@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "\nOn 06.01.22 06:44, Paul A Jungwirth wrote:\n>> I didn't follow why indexes would have periods, for example, the new\n>> period field in IndexStmt. Is that explained anywhere?\n> \n> When you create a primary key or a unique constraint (which are backed\n> by a unique index), you can give a period name to make it a temporal\n> constraint. We create the index first and then create the constraint\n> as a side-effect of that (e.g. index_create calls\n> index_constraint_create). The analysis phase generates an IndexStmt.\n> So I think this was mostly a way to pass the period info down to the\n> constraint. It probably doesn't actually need to be stored on pg_index\n> though. Maybe it does for index_concurrently_create_copy. I'll add\n> some comments, but if you think it's the wrong approach let me know.\n\nThis seems backwards. Currently, when you create a constraint, the \nindex is created as a side effect and is owned, so to speak, by the \nconstraint. What you are describing here sounds like the index owns the \nconstraint. This needs to be reconsidered, I think.\n\n>> Of course, the main problem in this patch is that for most uses it\n>> requires btree_gist. I think we should consider moving that into\n>> core, or at least the support for types that are most relevant to this\n>> functionality, specifically the date/time types. Aside from user\n>> convenience, this would also allow writing more realistic test cases.\n> \n> I think this would be great too. How realistic do you think it is? I\n> figured since exclusion constraints are also pretty useless without\n> btree_gist, it wasn't asking too much to have people install the\n> extension, but still it'd be better if it were all built in.\n\nIMO, if this temporal feature is to happen, btree_gist needs to be moved \ninto core first. Having to install an extension in order to use an \nin-core feature like this isn't going to be an acceptable experience.\n\n>> src/backend/access/brin/brin_minmax_multi.c\n>>\n>> These renaming changes seem unrelated (but still seem like a good\n>> idea). Should they be progressed separately?\n> \n> I can pull this out into a separate patch. I needed to do it because\n> when I added an `#include <rangetypes.h>` somewhere, these conflicted\n> with the range_{de,}serialize functions declared there.\n\nOK, I have committed this separately.\n\n>> I don't understand why a temporal primary key is required for doing\n>> UPDATE FOR PORTION OF. I don't see this in the standard.\n> \n> You're right, it's not in the standard. I'm doing that because\n> creating the PK is when we add the triggers to implement UPDATE FOR\n> PORTION OF. I thought it was acceptable since we also require a\n> PK/unique constraint as the referent of a foreign key.\n\nThat part *is* in the standard.\n\n> But we could\n> avoid it if I went back to the executor-based FOR PORTION OF\n> implementation, since that doesn't depend on triggers. What do you\n> think?\n\nI think it's worth trying to do this without triggers.\n\nBut if you are just looking for a way to create the triggers, why are \nthey not just created when the table is created?\n\n> I think it would be smart to have a rough plan for how this work will\n> be compatible with system-time support. Corey & I have talked about\n> that a lot, and In general they are orthogonal, but it would be nice\n> to have details written down somewhere.\n\nI personally don't see why we need to worry about system time now. \nSystem time seems quite a complicated feature, since you have to figure \nout a system to store and clean the old data, whereas this application \ntime feature is ultimately mostly syntax sugar around ranges and \nexclusion constraints. As long as we keep the standard syntax for \nsystem time available for future use (which is what your patch does), I \ndon't see a need to go deeper right now.\n\n\n", "msg_date": "Mon, 10 Jan 2022 09:53:48 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Hi,\n\nOn Sat, Nov 20, 2021 at 05:51:16PM -0800, Paul A Jungwirth wrote:\n> \n> Here are updated patches. They are rebased and clean up some of my TODOs.\n\nThe cfbot reports that the patchset doesn't apply anymore:\nhttp://cfbot.cputube.org/patch_36_2048.log\n=== Applying patches on top of PostgreSQL commit ID 5513dc6a304d8bda114004a3b906cc6fde5d6274 ===\n=== applying patch ./v10-0001-Add-PERIODs.patch\npatching file src/backend/commands/tablecmds.c\nHunk #1 FAILED at 40.\n[...]\n1 out of 21 hunks FAILED -- saving rejects to file src/backend/commands/tablecmds.c.rej\npatching file src/bin/pg_dump/pg_dump.c\nHunk #1 succeeded at 5906 with fuzz 2 (offset -454 lines).\nHunk #2 FAILED at 6425.\nHunk #3 succeeded at 6121 with fuzz 2 (offset -566 lines).\nHunk #4 succeeded at 6203 (offset -561 lines).\nHunk #5 succeeded at 8015 with fuzz 2 (offset -539 lines).\nHunk #6 FAILED at 8862.\nHunk #7 FAILED at 8875.\nHunk #8 FAILED at 8917.\n[...]\n4 out of 15 hunks FAILED -- saving rejects to file src/bin/pg_dump/pg_dump.c.rej\npatching file src/bin/pg_dump/pg_dump.h\nHunk #2 FAILED at 284.\nHunk #3 FAILED at 329.\nHunk #4 succeeded at 484 (offset 15 lines).\n2 out of 4 hunks FAILED -- saving rejects to file src/bin/pg_dump/pg_dump.h.rej\n\nI also see that there were multiple reviews with unanswered comments, so I will\nswitch the cf entry to Waiting on Author.\n\n\n", "msg_date": "Sat, 15 Jan 2022 13:58:49 +0800", "msg_from": "Julien Rouhaud <rjuju123@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 10.01.22 09:53, Peter Eisentraut wrote:\n>>> Of course, the main problem in this patch is that for most uses it\n>>> requires btree_gist.  I think we should consider moving that into\n>>> core, or at least the support for types that are most relevant to this\n>>> functionality, specifically the date/time types.  Aside from user\n>>> convenience, this would also allow writing more realistic test cases.\n>>\n>> I think this would be great too. How realistic do you think it is? I\n>> figured since exclusion constraints are also pretty useless without\n>> btree_gist, it wasn't asking too much to have people install the\n>> extension, but still it'd be better if it were all built in.\n> \n> IMO, if this temporal feature is to happen, btree_gist needs to be moved \n> into core first.  Having to install an extension in order to use an \n> in-core feature like this isn't going to be an acceptable experience.\n\nI have started a separate thread about this question.\n\n\n", "msg_date": "Wed, 19 Jan 2022 09:32:36 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Hello,\n\nThank you again for the review. Here is a patch with most of your \nfeedback addressed. Sorry it has taken so long! These patches are \nrebased up to 1ab763fc22adc88e5d779817e7b42b25a9dd7c9e\n(May 3).\n\nThe big change is switching from implementing FOR PORTION OF with \ntriggers back to an executor node implementation. I think this is a lot \nsimpler and means we don't have to be so \"premeditated\" (for example you \njust need a PERIOD/range, not a temporal PK).\n\nI've also made some progress on partitioning temporal tables. It still \nneeds some work though, and also it depends on my separate commitfest \nentry (https://commitfest.postgresql.org/43/4065/). So I've left it out \nof the patches attached here.\n\nA few more details below:\n\nBack in January 2022, Peter Eisentraut wrote:\n> make_period_not_backward(): Hardcoding the name of the operator as \"<\"\n> is not good. You should perhaps lookup the less-than operator in the\n> type cache. Look around for TYPECACHE_LT_OPR for how this is usually done. \n> ...\n> transformIndexConstraint(): As above, we can't look up the && operator\n> by name. In this case, I suppose we should look it up through the\n> index AM support operators.\n\nI've changed most locations to look up the operators we need using \nstrategy number. But in some places I need the range intersects operator \n(`*`) and we don't have a strategy number for that. I don't really \nunderstand the purpose of not hardcoding operator names here. Can you \ngive me the reasons for that? Do you have any suggestions what I can do \nto use `*`? Also, when I'm doing these operator lookups, do I need \npermission checks similar to what I see in ComputeIndexAttrs?\n\n> Further, the additions to this function are very complicated and not\n> fully explained. I'm suspicious about things like\n> findNewOrOldColumn() -- generally we should look up columns by number\n> not name. Perhaps you can add a header comment or split out the code\n> further into smaller functions.\n\nI still have some work to do on this. I agree it's very complicated, so \nI'm going to see what kind of refactoring I can do.\n\n>>> I didn't follow why indexes would have periods, for example, the new\n>>> period field in IndexStmt. Is that explained anywhere?\n>>\n>> When you create a primary key or a unique constraint (which are backed\n>> by a unique index), you can give a period name to make it a temporal\n>> constraint. We create the index first and then create the constraint\n>> as a side-effect of that (e.g. index_create calls\n>> index_constraint_create). The analysis phase generates an IndexStmt.\n>> So I think this was mostly a way to pass the period info down to the\n>> constraint. It probably doesn't actually need to be stored on pg_index\n>> though. Maybe it does for index_concurrently_create_copy. I'll add\n>> some comments, but if you think it's the wrong approach let me know.\n> \n> This seems backwards. Currently, when you create a constraint, the index is created as a side effect and is owned, so to speak, by the constraint. What you are describing here sounds like the index owns the constraint. This needs to be reconsidered, I think.\n\nAfter looking at this again I do think to reference the period from the \nindex, not vice versa. The period is basically one of the index elements \n(e.g. `PRIMARY KEY (id, valid_at WITHOUT OVERLAPS)`). You can define a \n`PERIOD` without an index, but you can't define a WITHOUT OVERLAPS index \nwithout a period. In addition you could have multiple indexes using the \nsame period (though this is probably unusual and technically disallowed \nby the standard, although in principal you could do it), but not \nmultiple periods within the same index. I understand what you're saying \nabout how constraints cause indexes as a by-product, but here the \nconstraint isn't the PERIOD; it's the PRIMARY KEY or UNIQUE constraint. \nThe PERIOD is just something the constraint & index refer to (like an \nexpression indexElem). The dependency direction also suggests the period \nshould be referenced by the index: you can drop the index without \ndropping the period, but dropping the period would cascade to dropping \nthe index (or fail). I hope that makes sense. But let me know if you \nstill disagree.\n\n> Do we really need different trigger names depending on whether the\n> foreign key is temporal? \n\nThey don't have to be different. I used separate C functions because I \ndidn't want standard FKs to be slowed/complicated by the temporal ones, \nand also I wanted to avoid merge conflicts with the work on avoiding SPI \nin RI checks. But you're just asking about the trigger names, right? I \nhaven't changed those yet but it shouldn't take long.\n\n> IMO, if this temporal feature is to happen, btree_gist needs to be moved \n> into core first.  Having to install an extension in order to use an \n> in-core feature like this isn't going to be an acceptable experience.\n\nAs far as I can tell the conversation about moving this into core hasn't \ngone anywhere. Do you still think this is a prerequisite to this patch? \nIs there anything I can do to help move `btree_gist` forward? It seems \nlike a large backwards compatibility challenge. I imagine that getting \nagreement on how to approach it is actually more work than doing the \ndevelopment. I'd be very happy for any suggestions here!\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Wed, 3 May 2023 14:02:47 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 03.05.23 23:02, Paul Jungwirth wrote:\n> Thank you again for the review. Here is a patch with most of your \n> feedback addressed. Sorry it has taken so long! These patches are \n> rebased up to 1ab763fc22adc88e5d779817e7b42b25a9dd7c9e\n> (May 3).\n\nHere are a few small fixup patches to get your patch set compiling cleanly.\n\nAlso, it looks like the patches 0002, 0003, and 0004 are not split up \ncorrectly. 0002 contains tests using the FOR PORTION OF syntax \nintroduced in 0003, and 0003 uses the function build_period_range() from \n0004.", "msg_date": "Mon, 8 May 2023 09:10:09 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "> On 8 May 2023, at 09:10, Peter Eisentraut <peter.eisentraut@enterprisedb.com> wrote:\n> \n> On 03.05.23 23:02, Paul Jungwirth wrote:\n>> Thank you again for the review. Here is a patch with most of your feedback addressed. Sorry it has taken so long! These patches are rebased up to 1ab763fc22adc88e5d779817e7b42b25a9dd7c9e\n>> (May 3).\n> \n> Here are a few small fixup patches to get your patch set compiling cleanly.\n> \n> Also, it looks like the patches 0002, 0003, and 0004 are not split up correctly. 0002 contains tests using the FOR PORTION OF syntax introduced in 0003, and 0003 uses the function build_period_range() from 0004.\n\nThese patches no longer apply without a new rebase. Should this patch be\nclosed in while waiting for the prequisite of adding btree_gist to core\nmentioned upthread? I see no patch registered in the CF for this unless I'm\nmissing sometihng.\n\n--\nDaniel Gustafsson\n\n\n\n", "msg_date": "Tue, 4 Jul 2023 14:48:48 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 04.07.23 14:48, Daniel Gustafsson wrote:\n>> On 8 May 2023, at 09:10, Peter Eisentraut <peter.eisentraut@enterprisedb.com> wrote:\n>>\n>> On 03.05.23 23:02, Paul Jungwirth wrote:\n>>> Thank you again for the review. Here is a patch with most of your feedback addressed. Sorry it has taken so long! These patches are rebased up to 1ab763fc22adc88e5d779817e7b42b25a9dd7c9e\n>>> (May 3).\n>>\n>> Here are a few small fixup patches to get your patch set compiling cleanly.\n>>\n>> Also, it looks like the patches 0002, 0003, and 0004 are not split up correctly. 0002 contains tests using the FOR PORTION OF syntax introduced in 0003, and 0003 uses the function build_period_range() from 0004.\n> \n> These patches no longer apply without a new rebase. Should this patch be\n> closed in while waiting for the prequisite of adding btree_gist to core\n> mentioned upthread? I see no patch registered in the CF for this unless I'm\n> missing sometihng.\n\nI had talked to Paul about this offline a while ago. btree_gist to core \nis no longer considered a prerequisite. But Paul was planning to \nproduce a new patch set that is arranged and sequenced a bit \ndifferently. Apparently, that new version is not done yet, so it would \nmake sense to either close this entry as returned with feedback, or move \nit to the next commit fest as waiting on author.\n\n\n\n", "msg_date": "Thu, 6 Jul 2023 10:12:57 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "> On 6 Jul 2023, at 10:12, Peter Eisentraut <peter.eisentraut@enterprisedb.com> wrote:\n\n> it would make sense to either close this entry as returned with feedback, or move it to the next commit fest as waiting on author.\n\nFair enough, done.\n\n--\nDaniel Gustafsson\n\n\n\n", "msg_date": "Thu, 6 Jul 2023 10:24:24 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Thu, Jul 6, 2023 at 1:13 AM Peter Eisentraut\n<peter.eisentraut@enterprisedb.com> wrote:\n>\n> I had talked to Paul about this offline a while ago. btree_gist to core\n> is no longer considered a prerequisite. But Paul was planning to\n> produce a new patch set that is arranged and sequenced a bit\n> differently. Apparently, that new version is not done yet, so it would\n> make sense to either close this entry as returned with feedback, or move\n> it to the next commit fest as waiting on author.\n\nHere are some new patch files based on discussions from PGCon. The\npatches are reorganized a bit to hopefully make them easier to review:\n\nInitially I implement all functionality on just range columns, without\nsupporting PERIODs yet. There are patches for temporal PRIMARY\nKEY/UNIQUE constraints, for simple foreign keys (without CASCADE/SET\nNULL/SET DEFAULT), for UPDATE/DELETE FOR PORTION OF, and then for the\nrest of the FK support (which depends on FOR PORTION OF). If you\ncompare these patches to the v11 ones, you'll see that a ton of\nclutter disappears by not supporting PERIODs as a separate \"thing\".\n\nFinally there is a patch adding PERIOD syntax, but with a new\nimplementation where a PERIOD causes us to just define a GENERATED\nrange column. That means we can support all the same things as before\nbut without adding the clutter. This patch isn't quite working yet\n(especially ALTER TABLE), but I thought I'd send where I'm at so far,\nsince it sounds like folks are interested in doing a review. Also it\nwas a little tricky dealing with the dependency between the PERIOD and\nthe GENERATED column. (See the comments in the patch.) If anyone has a\nsuggestion there I'd be happy to hear it.\n\nMy goal is to include another patch soon to support hidden columns, so\nthat the period's GENERATED column can be hidden. I read the\nconversation about a recent patch attempt for something similar, and I\nthink I can use most of that (but cut some of the things the community\nwas worried about).\n\nAll these patches need some polishing, but I think there is enough new\nhere for them to be worth reading for anyone interested in temporal\nprogress.\n\nI'll set this commitfest entry back to Needs Review. Thanks for taking a look!\n\nPaul", "msg_date": "Thu, 6 Jul 2023 18:03:37 -0700", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 07.07.23 03:03, Paul A Jungwirth wrote:\n> Here are some new patch files based on discussions from PGCon.\n\nHere are a few fixup patches to get things building without warnings and \nerrors.\n\nThe last patch (your 0005) fails the regression test for me and it \ndidn't appear to be a trivial problem, so please take another look at \nthat sometime. (Since it's the last patch, it's obviously lower priority.)", "msg_date": "Wed, 12 Jul 2023 10:24:45 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Fri, Jul 7, 2023 at 9:04 AM Paul A Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On Thu, Jul 6, 2023 at 1:13 AM Peter Eisentraut\n> <peter.eisentraut@enterprisedb.com> wrote:\n> >\n> > I had talked to Paul about this offline a while ago. btree_gist to core\n> > is no longer considered a prerequisite. But Paul was planning to\n> > produce a new patch set that is arranged and sequenced a bit\n> > differently. Apparently, that new version is not done yet, so it would\n> > make sense to either close this entry as returned with feedback, or move\n> > it to the next commit fest as waiting on author.\n>\n> Here are some new patch files based on discussions from PGCon. The\n> patches are reorganized a bit to hopefully make them easier to review:\n>\n> Initially I implement all functionality on just range columns, without\n> supporting PERIODs yet. There are patches for temporal PRIMARY\n> KEY/UNIQUE constraints, for simple foreign keys (without CASCADE/SET\n> NULL/SET DEFAULT), for UPDATE/DELETE FOR PORTION OF, and then for the\n> rest of the FK support (which depends on FOR PORTION OF). If you\n> compare these patches to the v11 ones, you'll see that a ton of\n> clutter disappears by not supporting PERIODs as a separate \"thing\".\n>\n> Finally there is a patch adding PERIOD syntax, but with a new\n> implementation where a PERIOD causes us to just define a GENERATED\n> range column. That means we can support all the same things as before\n> but without adding the clutter. This patch isn't quite working yet\n> (especially ALTER TABLE), but I thought I'd send where I'm at so far,\n> since it sounds like folks are interested in doing a review. Also it\n> was a little tricky dealing with the dependency between the PERIOD and\n> the GENERATED column. (See the comments in the patch.) If anyone has a\n> suggestion there I'd be happy to hear it.\n>\n> My goal is to include another patch soon to support hidden columns, so\n> that the period's GENERATED column can be hidden. I read the\n> conversation about a recent patch attempt for something similar, and I\n> think I can use most of that (but cut some of the things the community\n> was worried about).\n>\n> All these patches need some polishing, but I think there is enough new\n> here for them to be worth reading for anyone interested in temporal\n> progress.\n>\n> I'll set this commitfest entry back to Needs Review. Thanks for taking a look!\n>\n> Paul\n\ndue to change in:\nhttps://www.postgresql.org/message-id/flat/ec8b1d9b-502e-d1f8-e909-1bf9dffe6fa5@illuminatedcomputing.com\n\ngit apply $DOWNLOADS/patches/v12-0001-Add-temporal-PRIMARY-KEY-and-UNIQUE-constraints.patch\nerror: patch failed: src/backend/commands/indexcmds.c:940\nerror: src/backend/commands/indexcmds.c: patch does not apply\n\nprobably need some adjustment.\n\n\n", "msg_date": "Sat, 15 Jul 2023 17:04:07 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 7/12/23 01:24, Peter Eisentraut wrote:\n> On 07.07.23 03:03, Paul A Jungwirth wrote:\n>> Here are some new patch files based on discussions from PGCon.\n> \n> Here are a few fixup patches to get things building without warnings and \n> errors.\n> \n> The last patch (your 0005) fails the regression test for me and it \n> didn't appear to be a trivial problem, so please take another look at \n> that sometime.  (Since it's the last patch, it's obviously lower priority.)\n\nHello,\n\nHere are the latest patches for my temporal tables work. They are \nrebased on e8d74ad6 from Aug 31.\n\nI incorporated Peter's edits mentioned above and have made various other \nimprovements.\n\nThe most significant change is support for partitioned tables. Those now \nwork with temporal PRIMARY KEY and UNIQUE constraints, FOR PORTION OF \ncommands, and FOREIGN KEYs.\n\nI've tried to clean up the first four patches to get them ready for \ncommitting, since they could get committed before the PERIOD patch. I \nthink there is a little more cleanup needed but they should be ready for \na review.\n\nThe PERIOD patch is not finished and includes some deliberately-failing \ntests. I did make some progress here finishing ALTER TABLE ADD PERIOD.\n\nI could use help handling the INTERNAL depenency from the PERIOD to its \n(hidden) GENERATED column. The problem is in findDependentObjects: if \nyou drop the PERIOD, then Postgres automatically tries to drop the \ncolumn (correctly), but then it seems to think it needs to drop the \nwhole table. I think this is because a column's object address is the \ntable's object address plus a subaddress equaling the attno, and \nfindDependentObjects thinks it should drop the whole thing. I'm sure I \ncan sort this out, but if anyone has a suggestion it might save me some \ntime.\n\nThanks for taking a look!\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Thu, 31 Aug 2023 14:26:31 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": ">\n> The PERIOD patch is not finished and includes some deliberately-failing\n> tests. I did make some progress here finishing ALTER TABLE ADD PERIOD.\n>\n\nIf it's ok with you, I need PERIODs for System Versioning, and planned on\ndeveloping a highly similar version, albeit closer to the standard. It\nshouldn't interfere with your work as you're heavily leveraging range\ntypes.\n\nThe PERIOD patch is not finished and includes some deliberately-failing \ntests. I did make some progress here finishing ALTER TABLE ADD PERIOD.If it's ok with you, I need PERIODs for System Versioning, and planned on developing a highly similar version, albeit closer to the standard. It shouldn't interfere with your work as you're heavily leveraging range types.", "msg_date": "Fri, 1 Sep 2023 00:02:09 -0400", "msg_from": "Corey Huinker <corey.huinker@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 31.08.23 23:26, Paul Jungwirth wrote:\n> I've tried to clean up the first four patches to get them ready for \n> committing, since they could get committed before the PERIOD patch. I \n> think there is a little more cleanup needed but they should be ready for \n> a review.\n\nLooking at the patch 0001 \"Add temporal PRIMARY KEY and UNIQUE constraints\":\n\nGenerally, this looks like a good direction. The patch looks \ncomprehensive, with documentation and tests, and appears to cover all \nthe required pieces (client programs, ruleutils, etc.).\n\n\nI have two conceptual questions that should be clarified before we go \nmuch further:\n\n1) If I write UNIQUE (a, b, c WITHOUT OVERLAPS), does the WITHOUT \nOVERLAPS clause attach to the last column, or to the whole column list? \nIn the SQL standard, you can only have one period and it has to be \nlisted last, so this question does not arise. But here we are building \na more general facility to then build the SQL facility on top of. So I \nthink it doesn't make sense that the range column must be last or that \nthere can only be one. Also, your implementation requires at least one \nnon-overlaps column, which also seems like a confusing restriction.\n\nI think the WITHOUT OVERLAPS clause should be per-column, so that \nsomething like UNIQUE (a WITHOUT OVERLAPS, b, c WITHOUT OVERLAPS) would \nbe possible. Then the WITHOUT OVERLAPS clause would directly correspond \nto the choice between equality or overlaps operator per column.\n\nAn alternative interpretation would be that WITHOUT OVERLAPS applies to \nthe whole column list, and we would take it to mean, for any range \ncolumn, use the overlaps operator, for any non-range column, use the \nequals operator. But I think this would be confusing and would prevent \nthe case of using the equality operator for some ranges and the overlaps \noperator for some other ranges in the same key.\n\n2) The logic hinges on get_index_attr_temporal_operator(), to pick the \nequality and overlaps operator for each column. For btree indexes, the \nstrategy numbers are fixed, so this is straightforward. But for gist \nindexes, the strategy numbers are more like recommendations. Are we \ncomfortable with how this works? I mean, we could say, if you want to \nbe able to take advantage of the WITHOUT OVERLAPS syntax, you have to \nuse these numbers, otherwise you're on your own. It looks like the gist \nstrategy numbers are already hardcoded in a number of places, so maybe \nthat's all okay, but I feel we should be more explicit about this \nsomewhere, maybe in the documentation, or at least in code comments.\n\n\nBesides that, some stylistic comments:\n\n* There is a lot of talk about \"temporal\" in this patch, but this \nfunctionality is more general than temporal. I would prefer to change \nthis to more neutral terms like \"overlaps\".\n\n* The field ii_Temporal in IndexInfo doesn't seem necessary and could be \nhandled via local variables. See [0] for a similar discussion:\n\n[0]: \nhttps://www.postgresql.org/message-id/flat/f84640e3-00d3-5abd-3f41-e6a19d33c40b@eisentraut.org\n\n* In gram.y, change withoutOverlapsClause -> without_overlaps_clause for \nconsistency with the surrounding code.\n\n* No-op assignments like n->without_overlaps = NULL; can be omitted. \n(Or you should put them everywhere. But only in some places seems \ninconsistent and confusing.)\n\n\n", "msg_date": "Fri, 1 Sep 2023 11:30:57 +0200", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 9/1/23 11:30, Peter Eisentraut wrote:\n> 1) If I write UNIQUE (a, b, c WITHOUT OVERLAPS), does the WITHOUT \n> OVERLAPS clause attach to the last column, or to the whole column list? \n> In the SQL standard, you can only have one period and it has to be \n> listed last, so this question does not arise.  But here we are building \n> a more general facility to then build the SQL facility on top of.  So I \n> think it doesn't make sense that the range column must be last or that \n> there can only be one.  Also, your implementation requires at least one \n> non-overlaps column, which also seems like a confusing restriction.\n> \n> I think the WITHOUT OVERLAPS clause should be per-column, so that \n> something like UNIQUE (a WITHOUT OVERLAPS, b, c WITHOUT OVERLAPS) would \n> be possible.  Then the WITHOUT OVERLAPS clause would directly correspond \n> to the choice between equality or overlaps operator per column.\n> \n> An alternative interpretation would be that WITHOUT OVERLAPS applies to \n> the whole column list, and we would take it to mean, for any range \n> column, use the overlaps operator, for any non-range column, use the \n> equals operator.  But I think this would be confusing and would prevent \n> the case of using the equality operator for some ranges and the overlaps \n> operator for some other ranges in the same key.\n\nI prefer the first option. That is: WITHOUT OVERLAPS applies only to \nthe column or expression it is attached to, and need not be last in line.\n-- \nVik Fearing\n\n\n\n", "msg_date": "Fri, 1 Sep 2023 12:50:08 +0200", "msg_from": "Vik Fearing <vik@postgresfriends.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 9/1/23 03:50, Vik Fearing wrote:\n> On 9/1/23 11:30, Peter Eisentraut wrote:\n>> 1) If I write UNIQUE (a, b, c WITHOUT OVERLAPS), does the WITHOUT \n>> OVERLAPS clause attach to the last column, or to the whole column \n>> list? In the SQL standard, you can only have one period and it has to \n>> be listed last, so this question does not arise.  But here we are \n>> building a more general facility to then build the SQL facility on top \n>> of.  So I think it doesn't make sense that the range column must be \n>> last or that there can only be one.  Also, your implementation \n>> requires at least one non-overlaps column, which also seems like a \n>> confusing restriction.\n>>\n>> I think the WITHOUT OVERLAPS clause should be per-column, so that \n>> something like UNIQUE (a WITHOUT OVERLAPS, b, c WITHOUT OVERLAPS) \n>> would be possible.  Then the WITHOUT OVERLAPS clause would directly \n>> correspond to the choice between equality or overlaps operator per \n>> column.\n>>\n>> An alternative interpretation would be that WITHOUT OVERLAPS applies \n>> to the whole column list, and we would take it to mean, for any range \n>> column, use the overlaps operator, for any non-range column, use the \n>> equals operator.  But I think this would be confusing and would \n>> prevent the case of using the equality operator for some ranges and \n>> the overlaps operator for some other ranges in the same key.\n> \n> I prefer the first option.  That is: WITHOUT OVERLAPS applies only to \n> the column or expression it is attached to, and need not be last in line.\n\nI agree. The second option seems confusing and is more restrictive.\n\nI think allowing multiple uses of `WITHOUT OVERLAPS` (and in any \nposition) is a great recommendation that enables a lot of new \nfunctionality. Several books[1,2] about temporal databases describe a \nmulti-dimensional temporal space (even beyond application time vs. \nsystem time), and the standard is pretty disappointing here. It's not a \nweird idea.\n\nBut I just want to be explicit that this isn't something the standard \ndescribes. (I think everyone in the conversation so far understands \nthat.) So far I've tried to be pretty scrupulous about following \nSQL:2011, although personally I'd rather see Postgres support this \nfunctionality. And it's not like it goes *against* what the standard \nsays. But if there are any objections, I'd love to hear them before \nputting in the work. :-)\n\nIf we allow multiple+anywhere WITHOUT OVERLAPS in PRIMARY KEY & UNIQUE \nconstraints, then surely we also allow multiple+anywhere PERIOD in \nFOREIGN KEY constraints too. (I guess the standard switched keywords \nbecause a FK is more like \"MUST OVERLAPS\". :-)\n\nAlso if you have multiple application-time dimensions we probably need \nto allow multiple FOR PORTION OF clauses. I think the syntax would be:\n\nUPDATE t\n FOR PORTION OF valid_at FROM ... TO ...\n FOR PORTION OF asserted_at FROM ... TO ...\n [...]\n SET foo = bar\n\nDoes that sound okay?\n\nI don't quite understand this part:\n\n >> Also, your implementation\n >> requires at least one non-overlaps column, which also seems like a\n >> confusing restriction.\n\nThat's just a regular non-temporal constraint. Right? If I'm missing \nsomething let me know.\n\n[1] C. J. Date, Hugh Darwen, Nikos Lorentzos. Time and Relational \nTheory, Second Edition: Temporal Databases in the Relational Model and \nSQL. 2nd edition, 2014.\n[2] Tom Johnston. Bitemporal Data: Theory and Practice. 2014.\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Fri, 1 Sep 2023 12:56:40 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 9/1/23 21:56, Paul Jungwirth wrote:\n> On 9/1/23 03:50, Vik Fearing wrote:\n>> On 9/1/23 11:30, Peter Eisentraut wrote:\n>>> 1) If I write UNIQUE (a, b, c WITHOUT OVERLAPS), does the WITHOUT \n>>> OVERLAPS clause attach to the last column, or to the whole column \n>>> list? In the SQL standard, you can only have one period and it has to \n>>> be listed last, so this question does not arise.  But here we are \n>>> building a more general facility to then build the SQL facility on \n>>> top of.  So I think it doesn't make sense that the range column must \n>>> be last or that there can only be one.  Also, your implementation \n>>> requires at least one non-overlaps column, which also seems like a \n>>> confusing restriction.\n>>>\n>>> I think the WITHOUT OVERLAPS clause should be per-column, so that \n>>> something like UNIQUE (a WITHOUT OVERLAPS, b, c WITHOUT OVERLAPS) \n>>> would be possible.  Then the WITHOUT OVERLAPS clause would directly \n>>> correspond to the choice between equality or overlaps operator per \n>>> column.\n>>>\n>>> An alternative interpretation would be that WITHOUT OVERLAPS applies \n>>> to the whole column list, and we would take it to mean, for any range \n>>> column, use the overlaps operator, for any non-range column, use the \n>>> equals operator.  But I think this would be confusing and would \n>>> prevent the case of using the equality operator for some ranges and \n>>> the overlaps operator for some other ranges in the same key.\n>>\n>> I prefer the first option.  That is: WITHOUT OVERLAPS applies only to \n>> the column or expression it is attached to, and need not be last in line.\n> \n> I agree. The second option seems confusing and is more restrictive.\n> \n> I think allowing multiple uses of `WITHOUT OVERLAPS` (and in any \n> position) is a great recommendation that enables a lot of new \n> functionality. Several books[1,2] about temporal databases describe a \n> multi-dimensional temporal space (even beyond application time vs. \n> system time), and the standard is pretty disappointing here. It's not a \n> weird idea.\n> \n> But I just want to be explicit that this isn't something the standard \n> describes. (I think everyone in the conversation so far understands \n> that.) So far I've tried to be pretty scrupulous about following \n> SQL:2011, although personally I'd rather see Postgres support this \n> functionality. And it's not like it goes *against* what the standard \n> says. But if there are any objections, I'd love to hear them before \n> putting in the work. :-)\n\n\nI have no problem with a first version doing exactly what the standard \nsays and expanding it later.\n\n\n> If we allow multiple+anywhere WITHOUT OVERLAPS in PRIMARY KEY & UNIQUE \n> constraints, then surely we also allow multiple+anywhere PERIOD in \n> FOREIGN KEY constraints too. (I guess the standard switched keywords \n> because a FK is more like \"MUST OVERLAPS\". :-)\n\n\nSeems reasonable.\n\n\n> Also if you have multiple application-time dimensions we probably need \n> to allow multiple FOR PORTION OF clauses. I think the syntax would be:\n> \n> UPDATE t\n>   FOR PORTION OF valid_at FROM ... TO ...\n>   FOR PORTION OF asserted_at FROM ... TO ...\n>   [...]\n>   SET foo = bar\n> \n> Does that sound okay?\n\n\nThat sounds really cool.\n\n\n> [1] C. J. Date, Hugh Darwen, Nikos Lorentzos. Time and Relational \n> Theory, Second Edition: Temporal Databases in the Relational Model and \n> SQL. 2nd edition, 2014.\n> [2] Tom Johnston. Bitemporal Data: Theory and Practice. 2014.\n\n\nThanks! I have ordered these books.\n-- \nVik Fearing\n\n\n\n", "msg_date": "Sat, 2 Sep 2023 00:41:13 +0200", "msg_from": "Vik Fearing <vik@postgresfriends.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Sat, Sep 2, 2023 at 5:58 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n>\n> I don't quite understand this part:\n>\n> >> Also, your implementation\n> >> requires at least one non-overlaps column, which also seems like a\n> >> confusing restriction.\n>\n> That's just a regular non-temporal constraint. Right? If I'm missing\n> something let me know.\n>\n\nfor a range primary key, is it fine to expect it to be unique, not\nnull and also not overlap? (i am not sure how hard to implement it).\n\n-----------------------------------------------------------------\nquote from 7IWD2-02-Foundation-2011-12.pdf. 4.18.3.2 Unique\nconstraints, page 97 of 1483.\n\n4.18.3.2 Unique constraints In addition to the components of every\ntable constraint descriptor, a unique constraint descriptor includes:\n— An indication of whether it was defined with PRIMARY KEY or UNIQUE.\n— The names and positions of the unique columns specified in the\n<unique column list>\n — If <without overlap specification> is specified, then the name of\nthe period specified.\n\nIf the table descriptor for base table T includes a unique constraint\ndescriptor indicating that the unique constraint was defined with\nPRIMARY KEY, then the columns of that unique constraint constitute the\nprimary key of T. A table that has a primary key cannot have a proper\nsupertable.\nA unique constraint that does not include a <without overlap\nspecification> on a table T is satisfied if and only if there do not\nexist two rows R1 and R2 of T such that R1 and R2 have the same\nnon-null values in the unique columns. If a unique constraint UC on a\ntable T includes a <without overlap specification> WOS, then let\n<application time period name> ATPN be the contained in WOS. UC is\nsatisfied if and only if there do not exist two rows R1 and R2 of T\nsuch that R1 and R2 have the same non-null values in the unique\ncolumns and the ATPN period values of R1 and R2 overlap. In addition,\nif the unique constraint was defined with PRIMARY KEY, then it\nrequires that none of the values in the specified column or columns be\na null value.\n-----------------------------------------------------------------\nbased on the above, the unique constraint does not specify that the\ncolumn list must be range type. UNIQUE (a, c WITHOUT OVERLAPS).\nHere column \"a\" can be a range type (that have overlap property) and\ncan be not.\nIn fact, many of your primary key, foreign key regess test using\nsomething like '[11,11]' (which make it more easy to understand),\nwhich in logic is a non-range usage.\nSo UNIQUE (a, c WITHOUT OVERLAPS), column \"a\" be a non-range data type\ndoes make sense?\n\n\n", "msg_date": "Fri, 8 Sep 2023 09:24:22 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "hi.\nthe following script makes the server crash (Segmentation fault).\n\ncreate schema test;\nset search_path to test;\nDROP TABLE IF EXISTS temporal_rng;\nCREATE TABLE temporal_rng (id int4range, valid_at daterange);\nALTER TABLE temporal_rng\nADD CONSTRAINT temporal_rng_pk\nPRIMARY KEY (id, valid_at WITHOUT OVERLAPS);\n\nINSERT INTO temporal_rng VALUES\n ('[11,11]', daterange('2018-01-01', '2020-01-01')),\n ('[11,11]', daterange('2020-01-01', '2021-01-01')),\n ('[20,20]', daterange('2018-01-01', '2020-01-01')),\n ('[20,20]', daterange('2020-01-01', '2021-01-01'));\n\nDROP TABLE IF EXISTS temporal_fk_rng2rng;\n\nCREATE TABLE temporal_fk_rng2rng (\nid int4range,\nvalid_at tsrange,\nparent_id int4range,\nCONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS)\n);\n---------------------------------------------------------------\nBEGIN;\nALTER TABLE temporal_fk_rng2rng DROP CONSTRAINT IF EXISTS\ntemporal_fk_rng2rng_fk;\nALTER TABLE temporal_fk_rng2rng\n ALTER COLUMN parent_id SET DEFAULT '[-1,-1]',\n ALTER COLUMN valid_at SET DEFAULT tsrange('2018-01-01', '2019-11-11');\n\nALTER TABLE temporal_fk_rng2rng\nADD CONSTRAINT temporal_fk_rng2rng_fk\nFOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal_rng\n on update set DEFAULT\n on delete set DEFAULT;\n---------------------------------------------------------gdb related\ninfo:---------------\n(gdb) continue\nContinuing.\n\nProgram received signal SIGSEGV, Segmentation fault.\nFindFKComparisonOperators (fkconstraint=0x556450100bd8,\ntab=0x55644ff8f570, i=1, fkattnum=0x7ffeb3286ba0,\nold_check_ok=0x7ffeb3286b11, old_pfeqop_item=0x7ffeb3286b28,\npktype=3912, fktype=3908, opclass=10078, is_temporal=true,\nfor_overlaps=true, pfeqopOut=0x7ffeb3286da4, ppeqopOut=0x7ffeb3286e24,\nffeqopOut=0x7ffeb3286ea4) at\n../../Desktop/pg_sources/main/postgres/src/backend/commands/tablecmds.c:11582\n11582 pkattr_name = strVal(fkconstraint->pk_period);\n(gdb) where\n#0 FindFKComparisonOperators (fkconstraint=0x556450100bd8,\ntab=0x55644ff8f570, i=1,\n fkattnum=0x7ffeb3286ba0, old_check_ok=0x7ffeb3286b11,\nold_pfeqop_item=0x7ffeb3286b28, pktype=3912,\n fktype=3908, opclass=10078, is_temporal=true, for_overlaps=true,\npfeqopOut=0x7ffeb3286da4,\n ppeqopOut=0x7ffeb3286e24, ffeqopOut=0x7ffeb3286ea4)\n at ../../Desktop/pg_sources/main/postgres/src/backend/commands/tablecmds.c:11582\n#1 0x000055644e53875a in ATAddForeignKeyConstraint\n(wqueue=0x7ffeb3287118, tab=0x55644ff8f570,\n rel=0x7fb2dc124430, fkconstraint=0x556450100bd8, recurse=true,\nrecursing=false, lockmode=6)\n at ../../Desktop/pg_sources/main/postgres/src/backend/commands/tablecmds.c:10395\n#2 0x000055644e536cc2 in ATExecAddConstraint (wqueue=0x7ffeb3287118,\ntab=0x55644ff8f570,\n rel=0x7fb2dc124430, newConstraint=0x556450100bd8, recurse=true,\nis_readd=false, lockmode=6)\n at ../../Desktop/pg_sources/main/postgres/src/backend/commands/tablecmds.c:9948\n#3 0x000055644e528eaa in ATExecCmd (wqueue=0x7ffeb3287118,\ntab=0x55644ff8f570, cmd=0x5564500fae48,\n lockmode=6, cur_pass=10, context=0x7ffeb3287310)\n at ../../Desktop/pg_sources/main/postgres/src/backend/commands/tablecmds.c:5711\n#4 0x000055644e5283f6 in ATRewriteCatalogs (wqueue=0x7ffeb3287118,\nlockmode=6, context=0x7ffeb3287310)\n at ../../Desktop/pg_sources/main/postgres/src/backend/commands/tablecmds.c:5569\n#5 0x000055644e527031 in ATController (parsetree=0x55645000e228,\nrel=0x7fb2dc124430,\n cmds=0x55645000e1d8, recurse=true, lockmode=6, context=0x7ffeb3287310)\n at ../../Desktop/pg_sources/main/postgres/src/backend/commands/tablecmds.c:5136\n#6 0x000055644e526a9d in AlterTable (stmt=0x55645000e228, lockmode=6,\ncontext=0x7ffeb3287310)\n at ../../Desktop/pg_sources/main/postgres/src/backend/commands/tablecmds.c:4789\n#7 0x000055644e92eb65 in ProcessUtilitySlow (pstate=0x55644ff8f460,\npstmt=0x55645000e2d8,\n--Type <RET> for more, q to quit, c to continue without paging--\n 55645000d330 \"ALTER TABLE temporal_fk_rng2rng\\n\\tADD CONSTRAINT\ntemporal_fk_rng2rng_fk\\n\\tFOREIGN KEY (parent_id, PERIOD\nvalid_at)\\n\\tREFERENCES temporal_rng\\n on update set DEFAULT \\n\n on delete set DEFAULT;\", context=PROCESS_UTILITY_TOPLEVEL,\nparams=0x0, queryEnv=0x0,\n dest=0x55645000e698, qc=0x7ffeb3287970)\n at ../../Desktop/pg_sources/main/postgres/src/backend/tcop/utility.c:1329\n#8 0x000055644e92e24c in standard_ProcessUtility (pstmt=0x55645000e2d8,\n queryString=0x55645000d330 \"ALTER TABLE temporal_fk_rng2rng\\n\\tADD\nCONSTRAINT temporal_fk_rng2rng_fk\\n\\tFOREIGN KEY (parent_id, PERIOD\nvalid_at)\\n\\tREFERENCES temporal_rng\\n on update set DEFAULT \\n\n on delete set DEFAULT;\", readOnlyTree=false,\ncontext=PROCESS_UTILITY_TOPLEVEL, params=0x0,\n queryEnv=0x0, dest=0x55645000e698, qc=0x7ffeb3287970)\n at ../../Desktop/pg_sources/main/postgres/src/backend/tcop/utility.c:1078\n#9 0x000055644e92c921 in ProcessUtility (pstmt=0x55645000e2d8,\n queryString=0x55645000d330 \"ALTER TABLE temporal_fk_rng2rng\\n\\tADD\nCONSTRAINT temporal_fk_rng2rng_fk\\n\\tFOREIGN KEY (parent_id, PERIOD\nvalid_at)\\n\\tREFERENCES temporal_rng\\n on update set DEFAULT \\n\n on delete set DEFAULT;\", readOnlyTree=false,\ncontext=PROCESS_UTILITY_TOPLEVEL, params=0x0,\n queryEnv=0x0, dest=0x55645000e698, qc=0x7ffeb3287970)\n at ../../Desktop/pg_sources/main/postgres/src/backend/tcop/utility.c:530\n#10 0x000055644e92a83e in PortalRunUtility (portal=0x5564500a9840,\npstmt=0x55645000e2d8,\n isTopLevel=true, setHoldSnapshot=false, dest=0x55645000e698,\nqc=0x7ffeb3287970)\n at ../../Desktop/pg_sources/main/postgres/src/backend/tcop/pquery.c:1158\n#11 0x000055644e92abdb in PortalRunMulti (portal=0x5564500a9840,\nisTopLevel=true,\n setHoldSnapshot=false, dest=0x55645000e698,\naltdest=0x55645000e698, qc=0x7ffeb3287970)\n at ../../Desktop/pg_sources/main/postgres/src/backend/tcop/pquery.c:1315\n#12 0x000055644e929b53 in PortalRun (portal=0x5564500a9840,\ncount=9223372036854775807, isTopLevel=true,\n run_once=true, dest=0x55645000e698, altdest=0x55645000e698,\nqc=0x7ffeb3287970)\n at ../../Desktop/pg_sources/main/postgres/src/backend/tcop/pquery.c:791\n#13 0x000055644e91f206 in exec_simple_query (\n query_string=0x55645000d330 \"ALTER TABLE\ntemporal_fk_rng2rng\\n\\tADD CONSTRAINT\ntemporal_fk_rng2rng_fk\\n\\tFOREIGN KEY (parent_id, PERIOD\nvalid_at)\\n\\tREFERENCES temporal_rng\\n on update set DEFAULT \\n\n on delete set DEFAULT;\")\n at ../../Desktop/pg_sources/main/postgres/src/backend/tcop/postgres.c:1274\n--Type <RET> for more, q to quit, c to continue without paging--\n#14 0x000055644e926c49 in PostgresMain (dbname=0x556450045610 \"regression\",\n username=0x5564500455f8 \"jian\")\n at ../../Desktop/pg_sources/main/postgres/src/backend/tcop/postgres.c:4637\n#15 0x000055644e7ff0e9 in BackendRun (port=0x5564500394f0)\n at ../../Desktop/pg_sources/main/postgres/src/backend/postmaster/postmaster.c:4438\n#16 0x000055644e7fe6a1 in BackendStartup (port=0x5564500394f0)\n at ../../Desktop/pg_sources/main/postgres/src/backend/postmaster/postmaster.c:4166\n#17 0x000055644e7f8aa0 in ServerLoop ()\n at ../../Desktop/pg_sources/main/postgres/src/backend/postmaster/postmaster.c:1780\n#18 0x000055644e7f8042 in PostmasterMain (argc=3, argv=0x55644ff77e60)\n at ../../Desktop/pg_sources/main/postgres/src/backend/postmaster/postmaster.c:1464\n#19 0x000055644e67f884 in main (argc=3, argv=0x55644ff77e60)\n at ../../Desktop/pg_sources/main/postgres/src/backend/main/main.c:198\n\n\n", "msg_date": "Fri, 8 Sep 2023 17:35:10 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Fri, Sep 8, 2023 at 2:35 AM jian he <jian.universality@gmail.com> wrote:\n>\n> hi.\n> the following script makes the server crash (Segmentation fault).\n> [snip]\n>\n> ALTER TABLE temporal_fk_rng2rng\n> ADD CONSTRAINT temporal_fk_rng2rng_fk\n> FOREIGN KEY (parent_id, PERIOD valid_at)\n> REFERENCES temporal_rng\n> on update set DEFAULT\n> on delete set DEFAULT;\n\nThank you for the report! It looks like I forgot to handle implicit\ncolumn names after REFERENCES. The PERIOD part needs to get looked up\nfrom the PK as we do for normal FK attrs. I'll add that to the next\npatch.\n\nYours,\nPaul\n\n\n", "msg_date": "Fri, 8 Sep 2023 10:26:59 -0700", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "hi\nI am confused by (pk,fk) on delete on update (restriction and no\naction) result based on v13.\nrelated post: https://stackoverflow.com/questions/14921668/difference-between-restrict-and-no-action\nPlease check the following test and comments.\n\n---common setup for test0, test1,test2,test3\nBEGIN;\nDROP TABLE IF EXISTS temporal_rng, temporal_fk_rng2rng;\nCREATE TABLE temporal_rng ( id int4range,valid_at tsrange);\nALTER TABLE temporal_rng\nADD CONSTRAINT temporal_rng_pk\nPRIMARY KEY (id, valid_at WITHOUT OVERLAPS);\nCREATE TABLE temporal_fk_rng2rng (\nid int4range,\nvalid_at tsrange,\nparent_id int4range\n);\ncommit;\n\n----------------no_action_vs_restriction test0\nBEGIN;\nALTER TABLE temporal_fk_rng2rng\nDROP CONSTRAINT IF EXISTS temporal_fk_rng2rng_fk;\nALTER TABLE temporal_fk_rng2rng\nADD CONSTRAINT temporal_fk_rng2rng_fk\nFOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal_rng\n ON DELETE NO ACTION\nON UPDATE NO ACTION;\nINSERT INTO temporal_rng VALUES ('[5,5]', tsrange('2018-01-01', '2018-02-01')),\n ('[5,5]', tsrange('2018-02-01', '2018-03-01'));\nINSERT INTO temporal_fk_rng2rng VALUES ('[3,3]', tsrange('2018-01-05',\n'2018-01-10'), '[5,5]');\n\n/*\nexpect below to fail.\nsince to be deleted range is being referenced (in temporal_fk_rng2rng)\nbut the v13 patch won't fail.\n*/\ndelete from temporal_rng\n FOR PORTION OF valid_at FROM '2018-01-06' TO '2018-01-11'\nWHERE id = '[5,5]'\nAND valid_at @> '2018-01-05'::timestamp;\nTABLE temporal_rng \\; table temporal_fk_rng2rng;\nROLLBACK;\n\n\n----------------no_action_vs_restriction test1\nBEGIN;\nALTER TABLE temporal_fk_rng2rng\nDROP CONSTRAINT IF EXISTS temporal_fk_rng2rng_fk;\nALTER TABLE temporal_fk_rng2rng\nADD CONSTRAINT temporal_fk_rng2rng_fk\nFOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal_rng\nON DELETE RESTRICT\nON UPDATE RESTRICT;\nINSERT INTO temporal_rng VALUES ('[5,5]', tsrange('2018-01-01', '2018-02-01')),\n ('[5,5]', tsrange('2018-02-01', '2018-03-01'));\nINSERT INTO temporal_fk_rng2rng VALUES ('[3,3]', tsrange('2018-01-05',\n'2018-01-10'), '[5,5]');\n\n/*\nexpect the below command not to fail.\nsince to be deleted range is not being referenced in temporal_fk_rng2rng)\nbut the v13 patch will fail.\n*/\ndelete from temporal_rng\n FOR PORTION OF valid_at FROM '2018-01-12' TO '2018-01-20'\nWHERE id = '[5,5]'\nAND valid_at @> '2018-01-05'::timestamp;\nROLLBACK;\n\n\n----------------no_action_vs_restriction test2\nBEGIN;\nALTER TABLE temporal_fk_rng2rng\nDROP CONSTRAINT IF EXISTS temporal_fk_rng2rng_fk;\nALTER TABLE temporal_fk_rng2rng\nADD CONSTRAINT temporal_fk_rng2rng_fk\nFOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal_rng\nON DELETE no action\nON UPDATE no action;\nINSERT INTO temporal_rng VALUES ('[5,5]', tsrange('2018-01-01', '2018-02-01')),\n ('[5,5]', tsrange('2018-02-01', '2018-03-01'));\nINSERT INTO temporal_fk_rng2rng VALUES ('[3,3]', tsrange('2018-01-05',\n'2018-01-10'), '[5,5]');\n/*\nexpect below command fail.\nsince to be deleted range is being referenced (in temporal_fk_rng2rng)\n*/\nUPDATE temporal_rng FOR PORTION OF valid_at FROM '2018-01-06' TO '2018-01-08'\nSET id = '[7,7]'\nWHERE id = '[5,5]'\nAND valid_at @> '2018-01-05'::timestamp;\nTABLE temporal_rng \\; table temporal_fk_rng2rng;\n\nROLLBACK;\n\n\n----------------no_action_vs_restriction test3\nBEGIN;\nALTER TABLE temporal_fk_rng2rng\nDROP CONSTRAINT IF EXISTS temporal_fk_rng2rng_fk;\nALTER TABLE temporal_fk_rng2rng\nADD CONSTRAINT temporal_fk_rng2rng_fk\nFOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal_rng\nON DELETE RESTRICT\nON UPDATE RESTRICT;\nINSERT INTO temporal_rng VALUES ('[5,5]', tsrange('2018-01-01', '2018-02-01')),\n ('[5,5]', tsrange('2018-02-01', '2018-03-01'));\nINSERT INTO temporal_fk_rng2rng VALUES ('[3,3]', tsrange('2018-01-05',\n'2018-01-10'), '[5,5]');\n\n/*\nexpect the below command not to fail.\nsince to be deleted range is not being referenced in temporal_fk_rng2rng)\nbut the v13 patch will fail.\n*/\nUPDATE temporal_rng FOR PORTION OF valid_at FROM '2018-01-12' TO '2018-01-20'\nSET id = '[7,7]'\nWHERE id = '[5,5]'\nAND valid_at @> '2018-01-05'::timestamp;\nROLLBACK;\n\n\n", "msg_date": "Sat, 9 Sep 2023 15:54:37 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "hi. some trivial issue:\n\nin src/backend/catalog/index.c\n/* * System attributes are never null, so no need to check. */\nif (attnum <= 0)\n\nsince you already checked attnum == 0\nso here you can just attnum < 0?\n-------------------------------------------------\nERROR: column \"valid_at\" named in WITHOUT OVERLAPS is not a range type\n\nIMHO, \"named\" is unnecessary.\n-------------------------------------------------\ndoc/src/sgml/catalogs.sgml\npg_constraint adds another attribute (column): contemporal, seems no doc entry.\n\nalso the temporal in oxford definition is \"relating to time\", here we\ncan deal with range.\nSo maybe \"temporal\" is not that accurate?\n\n\n", "msg_date": "Tue, 12 Sep 2023 17:01:36 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 9/7/23 18:24, jian he wrote:\n> for a range primary key, is it fine to expect it to be unique, not\n> null and also not overlap? (i am not sure how hard to implement it).\n> \n> -----------------------------------------------------------------\n> quote from 7IWD2-02-Foundation-2011-12.pdf. 4.18.3.2 Unique\n> constraints, page 97 of 1483.\n> \n> ...\n> -----------------------------------------------------------------\n> based on the above, the unique constraint does not specify that the\n> column list must be range type. UNIQUE (a, c WITHOUT OVERLAPS).\n> Here column \"a\" can be a range type (that have overlap property) and\n> can be not.\n> In fact, many of your primary key, foreign key regess test using\n> something like '[11,11]' (which make it more easy to understand),\n> which in logic is a non-range usage.\n> So UNIQUE (a, c WITHOUT OVERLAPS), column \"a\" be a non-range data type\n> does make sense?\n\nI'm not sure I understand this question, but here are a few things that \nmight help clarify things:\n\nIn SQL:2011, a temporal primary key, unique constraint, or foreign key \nmay have one or more \"scalar\" parts (just like a regular key) followed \nby one \"PERIOD\" part, which is denoted with \"WITHOUT OVERLAPS\" (in \nPKs/UNIQUEs) or \"PERIOD\" (in FKs). Except for this last key part, \neverything is still compared for equality, just as in a traditional key. \nBut this last part is compared for overlaps. It's exactly the same as \n`EXCLUDE (id WITH =, valid_at WITH &&)`. The overlap part must come last \nand you can have only one (but you may have more than one scalar part if \nyou like).\n\nIn the patch, I have followed that pattern, except I also allow a \nregular range column anywhere I allow a PERIOD. In fact PERIODs are \nmostly implemented on top of range types. (Until recently PERIOD support \nwas in the first patch, not the last, and there was code all throughout \nfor handling both, e.g. within indexes, etc. But at pgcon Peter \nsuggested building everything on just range columns, and then having \nPERIODs create an \"internal\" GENERATED column, and that cleaned up the \ncode considerably.)\n\nOne possible source of confusion is that in the tests I'm using range \ncolumns *also* for the scalar key part. So valid_at is a tsrange, and \nint is an int4range. This is not normally how you'd use the feature, but \nyou need the btree_gist extension to mix int & tsrange (e.g.), and \nthat's not available in the regress tests. We are still comparing the \nint4range for regular equality and the tsrange for overlaps. If you \nsearch this thread there was some discussion about bringing btree_gist \ninto core, but it sounds like it doesn't need to happen. (It might be \nstill desirable independently. EXCLUDE constraints are also not really \nsomething you can use practically without it, and their tests use the \nsame trick of comparing ranges for plain equality.)\n\nThe piece of discussion you're replying to is about allowing *multiple* \nWITHOUT OVERLAPS modifiers on a PK/UNIQUE constraint, and in any \nposition. I think that's a good idea, so I've started adapting the code \nto support it. (In fact there is a lot of code that assumes the overlaps \nkey part will be in the last position, and I've never really been happy \nwith that, so it's an excuse to make that more robust.) Here I'm saying \n(1) you will still need at least one scalar key part, (2) if there are \nno WITHOUT OVERLAPS parts then you just have a regular key, not a \ntemporal one, (3) changing this obliges us to do the same for foreign \nkeys and FOR PORTION OF.\n\nI hope that helps! I apologize if I've completely missed the point. If \nso please try again. :-)\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Thu, 14 Sep 2023 09:09:19 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Thanks for the thorough review and testing!\n\nHere is a v14 patch with the segfault and incorrect handling of NO \nACTION and RESTRICT fixed (and reproductions added to the test suite).\n\nA few more comments below on feedback from you and Peter:\n\nOn 9/12/23 02:01, jian he wrote:\n> hi. some trivial issue:\n> \n> in src/backend/catalog/index.c\n> /* * System attributes are never null, so no need to check. */\n> if (attnum <= 0)\n> \n> since you already checked attnum == 0\n> so here you can just attnum < 0?\n\nI fixed the \"/* *\" typo here. I'm reluctant to change the attnum \ncomparison since that's not a line I touched. (It was just part of the \ncontext around the updated comment.) Your suggestion does make sense \nthough, so perhaps it should be a separate commit?\n\n> ERROR: column \"valid_at\" named in WITHOUT OVERLAPS is not a range type\n> \n> IMHO, \"named\" is unnecessary.\n\nChanged.\n\n> doc/src/sgml/catalogs.sgml\n> pg_constraint adds another attribute (column): contemporal, seems no doc entry.\n\nAdded.\n\n> also the temporal in oxford definition is \"relating to time\", here we\n> can deal with range.\n> So maybe \"temporal\" is not that accurate?\n\nI agree if we allow multiple WITHOUT OVERLAPS/etc clauses, we should \nchange the terminology. I'll include that with the multiple-range-keys \nchange discussed upthread.\n\nOn 9/1/23 02:30, Peter Eisentraut wrote:\n > * There is a lot of talk about \"temporal\" in this patch, but this\n > functionality is more general than temporal. I would prefer to change\n > this to more neutral terms like \"overlaps\".\n\nOkay, sounds like several of us agree on this.\n\n > * The field ii_Temporal in IndexInfo doesn't seem necessary and could\n > be handled via local variables. See [0] for a similar discussion:\n >\n > [0]:\n > \nhttps://www.postgresql.org/message-id/flat/f84640e3-00d3-5abd-3f41-e6a19d33c40b@eisentraut.org\n\nDone.\n\n > * In gram.y, change withoutOverlapsClause -> without_overlaps_clause\n > for consistency with the surrounding code.\n\nDone.\n\n > * No-op assignments like n->without_overlaps = NULL; can be omitted.\n > (Or you should put them everywhere. But only in some places seems\n > inconsistent and confusing.)\n\nChanged. That makes sense since newNode uses palloc0fast. FWIW there is \nquite a lot of other code in gram.y that sets NULL fields though, \nincluding in ConstraintElem, and it seems like it does improve the \nclarity a little. By \"everywhere\" I think you mean wherever the file \ncalls makeNode(Constraint)? I might go back and do it that way later.\n\nI'll keep working on a patch to support multiple range keys, but I \nwanted to work through the rest of the feedback first. Also there is \nsome fixing to do with partitions I believe, and then I'll finish the \nPERIOD support. So this v14 patch is just some minor fixes & tweaks from \nSeptember feedback.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Thu, 14 Sep 2023 09:11:02 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Fri, Sep 15, 2023 at 12:11 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n>\n> I'll keep working on a patch to support multiple range keys, but I\n> wanted to work through the rest of the feedback first. Also there is\n> some fixing to do with partitions I believe, and then I'll finish the\n> PERIOD support. So this v14 patch is just some minor fixes & tweaks from\n> September feedback.\n>\n\nsmall issues so far I found, v14.\n\nIndexInfo struct definition comment still has Temporal related\ncomment, should be removed.\n\ncatalog-pg-index.html, no indperiod doc entry, also in table pg_index,\ncolumn indperiod is junk value now.\nI think in UpdateIndexRelation, you need an add indperiod to build a\npg_index tuple, similar to what you did in CreateConstraintEntry.\n\nseems to make the following query works, we need to bring btree_gist\nrelated code to core?\nCREATE TABLE temporal_fk_rng2rng22 (id int8, valid_at int4range,\nunique (id, valid_at WITHOUT OVERLAPS));\n\n\n/* ----------------\n * pg_period definition. cpp turns this into\n * typedef struct FormData_pg_period\n * ----------------\n */\nCATALOG(pg_period,8000,PeriodRelationId)\n{\nOid oid; /* OID of the period */\nNameData pername; /* name of period */\nOid perrelid; /* OID of relation containing this period */\nint16 perstart; /* column for start value */\nint16 perend; /* column for end value */\nint16 perrange; /* column for range value */\nOid perconstraint; /* OID of (start < end) constraint */\n} FormData_pg_period;\n\nno idea what the above comment \"cpp'' refers to. The sixth field in\nFormData_pg_period: perrange, the comment conflict with catalogs.sgml\n>> perrngtype oid (references pg_type.oid)\n>> The OID of the range type associated with this period\n\n\ncreate table pt (id integer, ds date, de date, period for p (ds, de));\nSELECT table_name, column_name, column_default, is_nullable,\nis_generated, generation_expression\nFROM information_schema.columns\nWHERE table_name = 'pt' ORDER BY 1, 2;\n\nthe hidden generated column (p) is_nullable return NO. but ds, de\nis_nullable both return YES. so column p is_nullable should return\nYES?\n\n\n", "msg_date": "Mon, 18 Sep 2023 11:11:02 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Fri, Sep 15, 2023 at 12:11 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> Thanks for the thorough review and testing!\n>\n> Here is a v14 patch with the segfault and incorrect handling of NO\n> ACTION and RESTRICT fixed (and reproductions added to the test suite).\n>\n\nanother case:\nBEGIN;\nDROP TABLE IF EXISTS temporal_rng, temporal_fk_rng2rng;\nCREATE TABLE temporal_rng ( id int4range,valid_at tsrange);\nALTER TABLE temporal_rng\nADD CONSTRAINT temporal_rng_pk\nPRIMARY KEY (id, valid_at WITHOUT OVERLAPS);\nCREATE TABLE temporal_fk_rng2rng (\nid int4range,\nvalid_at tsrange,\nparent_id int4range\n);\nINSERT INTO temporal_rng VALUES ('[5,5]', tsrange('2018-01-01', '2018-02-01')),\n ('[5,5]', tsrange('2018-02-01', '2018-03-01'));\nINSERT INTO temporal_fk_rng2rng\nVALUES ('[3,3]', tsrange('2018-01-05','2018-01-10'), '[5,5]');\ncommit;\n\n\nBEGIN;\nALTER TABLE temporal_fk_rng2rng\nDROP CONSTRAINT IF EXISTS temporal_fk_rng2rng_fk;\nALTER TABLE temporal_fk_rng2rng\nADD CONSTRAINT temporal_fk_rng2rng_fk\nFOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal_rng\n ON DELETE NO ACTION\nON UPDATE NO ACTION;\nALTER TABLE temporal_fk_rng2rng ALTER CONSTRAINT\ntemporal_fk_rng2rng_fk DEFERRABLE INITIALLY DEFERRED;\n\ndelete from temporal_rng; ---should not fail.\ncommit; ---fail in here.\n\n-------------------------------\nseems in ATExecAlterConstrRecurse change to\n\n/*\n* Update deferrability of RI_FKey_noaction_del,\n* RI_FKey_noaction_upd, RI_FKey_check_ins and RI_FKey_check_upd\n* triggers, but not others; see createForeignKeyActionTriggers\n* and CreateFKCheckTrigger.\n*/\nif (tgform->tgfoid != F_RI_FKEY_NOACTION_DEL &&\ntgform->tgfoid != F_TRI_FKEY_NOACTION_DEL &&\ntgform->tgfoid != F_RI_FKEY_NOACTION_UPD &&\ntgform->tgfoid != F_TRI_FKEY_NOACTION_UPD &&\ntgform->tgfoid != F_RI_FKEY_CHECK_INS &&\ntgform->tgfoid != F_TRI_FKEY_CHECK_INS &&\ntgform->tgfoid != F_RI_FKEY_CHECK_UPD &&\ntgform->tgfoid != F_TRI_FKEY_CHECK_UPD)\ncontinue;\n\nwill work.\n\n\n", "msg_date": "Mon, 18 Sep 2023 20:49:14 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 9/17/23 20:11, jian he wrote:\n> small issues so far I found, v14.\n\nThank you again for the review! v15 is attached.\n\n> IndexInfo struct definition comment still has Temporal related\n> comment, should be removed.\n\nFixed.\n\n> catalog-pg-index.html, no indperiod doc entry, also in table pg_index,\n> column indperiod is junk value now.\n\nYou're right, it is just unneeded now that PERIODs are implemented by \nGENERATED columns. I've removed it.\n\n> I think in UpdateIndexRelation, you need an add indperiod to build a\n> pg_index tuple, similar to what you did in CreateConstraintEntry.\n\nIt's gone now.\n\n> seems to make the following query works, we need to bring btree_gist\n> related code to core?\n> CREATE TABLE temporal_fk_rng2rng22 (id int8, valid_at int4range, > unique (id, valid_at WITHOUT OVERLAPS));\n\nIt doesn't need to be brought into core, but you would need to say \n`CREATE EXTENSION btree_gist` first. Since the regression tests don't \nassume we've built contrib, we have to use a workaround there.\n\n> /* ----------------\n> * pg_period definition. cpp turns this into\n> * typedef struct FormData_pg_period\n> * ----------------\n> */\n> CATALOG(pg_period,8000,PeriodRelationId)\n> {\n> Oid oid; /* OID of the period */\n> NameData pername; /* name of period */\n> Oid perrelid; /* OID of relation containing this period */\n> int16 perstart; /* column for start value */\n> int16 perend; /* column for end value */\n> int16 perrange; /* column for range value */\n> Oid perconstraint; /* OID of (start < end) constraint */\n> } FormData_pg_period;\n> \n> no idea what the above comment \"cpp'' refers to.\n\nI believe cpp = C Pre-Processor. This comment is at the top of all the \ncatalog/pg_*.h files. The next line is part of the same sentence (which \ntook me a while to notice :-).\n\n> The sixth field in\n> FormData_pg_period: perrange, the comment conflict with catalogs.sgml\n>>> perrngtype oid (references pg_type.oid)\n>>> The OID of the range type associated with this period\n\nYou're right, fixed! More cruft from the old PERIOD implementation.\n\n> create table pt (id integer, ds date, de date, period for p (ds, de));\n> SELECT table_name, column_name, column_default, is_nullable,\n> is_generated, generation_expression\n> FROM information_schema.columns\n> WHERE table_name = 'pt' ORDER BY 1, 2;\n> \n> the hidden generated column (p) is_nullable return NO. but ds, de\n> is_nullable both return YES. so column p is_nullable should return\n> YES?\n\nThe is_nullable behavior is correct I believe. In a range if the \nlower/upper value is NULL, it signifies the range has no lower/upper \nbound. So it's fine for ds or de to be NULL, but not the range itself (p).\n\nTechnically the SQL spec says that the PERIOD start & end columns should \nbe NOT NULL, but that forces people to use ugly sentinel values like \n'3999-01-01'. It's a shame to make people do that when NULL works so \nwell instead. Our time-related types do have Infinity and -Infinity \nwhich is not as ugly, but many other types do not. Plus those values \ninteract badly with ranges. For example `select '(,)'::daterange - \n'(,Infinity)'::daterange` gives the infinitesimal result `[infinity,)`. \nI've heard at least one report of that make a mess in a user's database. \nIf a user wants to make the start/end columns NOT NULL they can, so I \nprefer not to force them.\n\nContinuing to your other email:\n\nOn 9/18/23 05:49, jian he wrote:\n > BEGIN;\n > ...\n > ALTER TABLE temporal_fk_rng2rng ALTER CONSTRAINT\n > temporal_fk_rng2rng_fk DEFERRABLE INITIALLY DEFERRED;\n >\n > delete from temporal_rng; ---should not fail.\n > commit; ---fail in here.\n\nGreat catch! This is fixed also.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Tue, 19 Sep 2023 19:50:10 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Wed, Sep 20, 2023 at 10:50 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On 9/17/23 20:11, jian he wrote:\n> > small issues so far I found, v14.\n>\n> Thank you again for the review! v15 is attached.\n>\n\nhi. some tiny issues.\nIN src/backend/utils/adt/ri_triggers.c\n\nelse {\nappendStringInfo(&querybuf, \"SELECT 1 FROM %s%s x\",\npk_only, pkrelname);\n}\nshould change to\n\nelse\n{\nappendStringInfo(&querybuf, \"SELECT 1 FROM %s%s x\",\npk_only, pkrelname);\n}\n\n----\nIt would be better, we mention it somewhere:\nby default, you can only have a primary key(range_type[...],\nrange_type WITHOUT OVERLAPS).\n\npreceding without overlaps, all columns (in primary key) data types\nonly allowed range types.\n-------------------------------\nThe WITHOUT OVERLAPS value must be a range type and is used to\nconstrain the record's applicability to just that interval (usually a\nrange of dates or timestamps).\n\n\"interval\", I think \"period\" or \"range\" would be better. I am not sure\nwe need to mention \" must be a range type, not a multi range type\".\n---------------------------------------------\nI just `git apply`, then ran the test, and one test failed. Some minor\nchanges need to make the test pass.\n\n\n", "msg_date": "Mon, 25 Sep 2023 12:52:40 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 9/24/23 21:52, jian he wrote:\n> On Wed, Sep 20, 2023 at 10:50 AM Paul Jungwirth\n> <pj@illuminatedcomputing.com> wrote:\n>>\n>> On 9/17/23 20:11, jian he wrote:\n>>> small issues so far I found, v14.\n>>\n>> Thank you again for the review! v15 is attached.\n>>\n> \n> hi. some tiny issues.\n\nRebased v16 patches attached.\n\n> IN src/backend/utils/adt/ri_triggers.c\n> \n> else {\n> appendStringInfo(&querybuf, \"SELECT 1 FROM %s%s x\",\n> pk_only, pkrelname);\n> }\n> should change to\n> \n> else\n> {\n> appendStringInfo(&querybuf, \"SELECT 1 FROM %s%s x\",\n> pk_only, pkrelname);\n> }\n\nFixed.\n\n> It would be better, we mention it somewhere:\n> by default, you can only have a primary key(range_type[...],\n> range_type WITHOUT OVERLAPS).\n> \n> preceding without overlaps, all columns (in primary key) data types\n> only allowed range types.\n> -------------------------------\n> The WITHOUT OVERLAPS value must be a range type and is used to\n> constrain the record's applicability to just that interval (usually a\n> range of dates or timestamps).\n> \n> \"interval\", I think \"period\" or \"range\" would be better. I am not sure\n> we need to mention \" must be a range type, not a multi range type\".\n\nI reworked those two paragraphs to incorporate those suggestions and \nhopefully clarify the idea bit further. (I'll revise these again once I \nsupport multiple WITHOUT OVERLAPS columns.)\n\n> I just `git apply`, then ran the test, and one test failed. Some minor\n> changes need to make the test pass.\n\nI couldn't reproduce this. If you're still seeing a failure please let \nme know what you're seeing.\n\nThese patches also fix a problem I found with FKs when used with \nbtree_gist. Privately I'm using the script below [1] to re-run all my \ntests with that extension and int+range columns. I'd like to add \nsomething similar to contrib/btree_gist. I'm open to advice how best to \ndo that if anyone has any!\n\n[1] #!/bin/bash\nset -eu\n\n# without_overlaps\n\ncat ../src/test/regress/sql/without_overlaps.sql | \\\n sed -E 's/int4range/integer/g' | \\\n sed -E 's/valid_at integer/valid_at int4range/' | \\\n sed -E 's/int8range/bigint/g' | \\\n sed -E 's/'\"'\"'\\[(-?[[:digit:]]+),\\1\\]'\"'\"'/\\1/g' | \\\n cat > ./sql/without_overlaps.sql\n\ncat ../src/test/regress/expected/without_overlaps.out | \\\n sed -E 's/int4range/integer/g' | \\\n sed -E 's/valid_at integer/valid_at int4range/' | \\\n sed -E 's/incompatible types: integer and tsrange/incompatible types: \nint4range and tsrange/' | \\\n sed -E 's/int8range/bigint/g' | \\\n sed -E 's/'\"'\"'\\[(-?[[:digit:]]+),\\1\\]'\"'\"'/\\1/g' | \\\n sed -E 's/'\"'\"'\\[(-?[[:digit:]]+),-?[[:digit:]]+\\)'\"'\"'/\\1/g' | \\\n sed -E 's/\\[(-?[[:digit:]]+),\\1\\]/\\1/g' | \\\n sed -E 's/\\[(-?[[:digit:]]+),-?[[:digit:]]+\\)/\\1/g' | \\\n sed -E 'N;P;s/^ +id [^\\n]+\\n-+(\\+.*)$/----\\1/p;D' | \\\n sed -E \n's/^----------\\+-----------\\+-----------\\+----------\\+---------$/----------+---------+-----------+----------+---------/' \n| \\\n sed -E \n's/^----\\+-------------------------\\+--------\\+-------$/----+-------------------------+-----+-------/' \n| \\\n cat > ./expected/without_overlaps.out\n\n# for_portion_of\n\ncat ../src/test/regress/sql/for_portion_of.sql | \\\n sed -E 's/int4range/integer/g' | \\\n sed -E 's/valid_at integer/valid_at int4range/' | \\\n sed -E 's/'\"'\"'\\[(-?[[:digit:]]+),\\1\\]'\"'\"'/\\1/g' | \\\n cat > ./sql/for_portion_of.sql\n\ncat ../src/test/regress/expected/for_portion_of.out | \\\n sed -E 's/int4range/integer/g' | \\\n sed -E 's/valid_at integer/valid_at int4range/' | \\\n sed -E 's/'\"'\"'\\[(-?[[:digit:]]+),\\1\\]'\"'\"'/\\1/g' | \\\n sed -E 's/'\"'\"'\\[(-?[[:digit:]]+),-?[[:digit:]]+\\)'\"'\"'/\\1/g' | \\\n sed -E 's/\\[(-?[[:digit:]]+),\\1\\]/\\1/g' | \\\n sed -E 's/\\[(-?[[:digit:]]+),-?[[:digit:]]+\\)/\\1/g' | \\\n sed -E 'N;P;s/^ +id [^\\n]+\\n-+(\\+.*)$/----\\1/p;D' | \\\n cat > ./expected/for_portion_of.out\n\nRegards,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Mon, 25 Sep 2023 13:20:59 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 25.09.23 21:20, Paul Jungwirth wrote:\n> On 9/24/23 21:52, jian he wrote:\n>> On Wed, Sep 20, 2023 at 10:50 AM Paul Jungwirth\n>> <pj@illuminatedcomputing.com> wrote:\n>>>\n>>> On 9/17/23 20:11, jian he wrote:\n>>>> small issues so far I found, v14.\n>>>\n>>> Thank you again for the review! v15 is attached.\n>>>\n>>\n>> hi. some tiny issues.\n> \n> Rebased v16 patches attached.\n\nLooking through the tests in v16-0001:\n\n+-- PK with no columns just WITHOUT OVERLAPS:\n+CREATE TABLE temporal_rng (\n+ valid_at tsrange,\n+ CONSTRAINT temporal_rng_pk PRIMARY KEY (valid_at WITHOUT OVERLAPS)\n+);\n+ERROR: syntax error at or near \"WITHOUT\"\n+LINE 3: CONSTRAINT temporal_rng_pk PRIMARY KEY (valid_at WITHOUT OV...\n+ ^\n\nI think this error is confusing. The SQL standard requires at least one \nnon-period column in a PK. I don't know why that is or why we should \nimplement it. But if we want to implement it, maybe we should enforce \nthat in parse analysis rather than directly in the parser, to be able to \nproduce a more friendly error message.\n\n+-- PK with a range column/PERIOD that isn't there:\n+CREATE TABLE temporal_rng (\n+ id INTEGER,\n+ CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT \nOVERLAPS)\n+);\n+ERROR: range or PERIOD \"valid_at\" in WITHOUT OVERLAPS does not exist\n\nI think here we should just produce a \"column doesn't exist\" error \nmessage, the same as if the \"id\" column was invalid. We don't need to \nget into the details of what kind of column it should be. That is done \nin the next test\n\n+ERROR: column \"valid_at\" in WITHOUT OVERLAPS is not a range type\n\nAlso, in any case it would be nice to have a location pointer here (for \nboth cases).\n\n+-- PK with one column plus a range:\n+CREATE TABLE temporal_rng (\n+ -- Since we can't depend on having btree_gist here,\n+ -- use an int4range instead of an int.\n+ -- (The rangetypes regression test uses the same trick.)\n+ id int4range,\n+ valid_at tsrange,\n+ CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT \nOVERLAPS)\n+);\n\nI'm confused why you are using int4range here (and in further tests) for \nthe scalar (non-range) part of the primary key. Wouldn't a plaint int4 \nserve here?\n\n+SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE \nconname = 'temporal_rng_pk';\n+ pg_get_indexdef\n+-------------------------------------------------------------------------------\n+ CREATE UNIQUE INDEX temporal_rng_pk ON temporal_rng USING gist (id, \nvalid_at)\n\nShouldn't this somehow show the operator classes for the columns? We \nare using different operator classes for the id and valid_at columns, \naren't we?\n\n+-- PK with USING INDEX (not possible):\n+CREATE TABLE temporal3 (\n+ id int4range,\n+ valid_at tsrange\n+);\n+CREATE INDEX idx_temporal3_uq ON temporal3 USING gist (id, valid_at);\n+ALTER TABLE temporal3\n+ ADD CONSTRAINT temporal3_pk\n+ PRIMARY KEY USING INDEX idx_temporal3_uq;\n+ERROR: \"idx_temporal3_uq\" is not a unique index\n+LINE 2: ADD CONSTRAINT temporal3_pk\n+ ^\n+DETAIL: Cannot create a primary key or unique constraint using such an \nindex.\n\nCould you also add a test where the index is unique and the whole thing \ndoes work?\n\n\nApart from the tests, how about renaming the column \npg_constraint.contemporal to something like to conwithoutoverlaps?\n\n\n\n", "msg_date": "Mon, 25 Sep 2023 22:00:30 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Hi Peter et al,\n\nOn 9/1/23 12:56, Paul Jungwirth wrote:\n>> On 9/1/23 11:30, Peter Eisentraut wrote:\n>>> I think the WITHOUT OVERLAPS clause should be per-column, so that \n>>> something like UNIQUE (a WITHOUT OVERLAPS, b, c WITHOUT OVERLAPS) \n>>> would be possible.  Then the WITHOUT OVERLAPS clause would directly \n>>> correspond to the choice between equality or overlaps operator per \n>>> column.\n> I think allowing multiple uses of `WITHOUT OVERLAPS` (and in any \n> position) is a great recommendation that enables a lot of new \n> functionality.\n\nI've been working on implementing this, but I've come to think it is the \nwrong way to go.\n\nIf we support this in primary key and unique constraints, then we must \nalso support it for foreign keys and UPDATE/DELETE FOR PORTION OF. But \nimplementing that logic is pretty tricky. For example take a foreign key \non (id, PERIOD valid_at, PERIOD asserted_at). We need to ensure the \nreferenced two-dimensional time space `contains` the referencing \ntwo-dimensional space. You can visualize a rectangle in two-dimensional \nspace for each referencing record (which we validate one at a time). The \nreferenced records must be aggregated and so form a polygon (of all \nright angles). For example the referencing record may be (1, [0,2), \n[0,2)) with referenced records of (1, [0,2), [0,1)) and (1, [0,1), \n[1,2)). (I'm using intranges since they're easier to read, but you could \nimagine these as dateranges like [2000-01-01,2002-01-01).) Now the \nrange_agg of their valid_ats is [0,2) and of their asserted_ats is \n[0,2). But the referenced 2d space still doesn't contain the referencing \nspace. It's got one corner missing. This is a well-known problem among \ngame developers. We're lucky not to have arbitrary polygons, but it's \nstill a tough issue.\n\nBesides `contains` we also need to compute `overlaps` and `intersects` \nto support these temporal features. Implementing that for 2d, 3d, etc \nlooks very complicated, for something that is far outside the normal use \ncase and also not part of the standard. It will cost a little \nperformance for the normal 1d use case too.\n\nI think a better approach (which I want to attempt as an add-on patch, \nnot in this main series) is to support not just range types, but any \ntype with the necessary operators. Then you could have an mdrange \n(multi-dimensional range) or potentially even an arbitrary n-dimensional \npolygon. (PostGIS has something like this, but its `contains` operator \ncompares (non-concave) *bounding boxes*, so it would not work for the \nexample above. Still the similarity between temporal and spatial data is \nstriking. I'm going to see if I can get some input from PostGIS folks \nabout how useful any of this is to them.) This approach would also let \nus use multiranges: not for multiple dimensions, but for non-contiguous \ntime spans stored in a single row. This puts the complexity in the types \nthemselves (which seems more appropriate) and is ultimately more \nflexible (supporting not just mdrange but also multirange, and other \nthings too).\n\nThis approach also means that instead of storing a mask/list of which \ncolumns use WITHOUT OVERLAPS, I can just store one attnum. Again, this \nsaves the common use-case from paying a performance penalty to support a \nmuch rarer one.\n\nI've still got my multi-WITHOUT OVERLAPS work, but I'm going to switch \ngears to what I've described here. Please let me know if you disagree!\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Tue, 10 Oct 2023 21:22:35 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 9/25/23 14:00, Peter Eisentraut wrote:\n> Looking through the tests in v16-0001:\n> \n> +-- PK with no columns just WITHOUT OVERLAPS:\n> +CREATE TABLE temporal_rng (\n> +       valid_at tsrange,\n> +       CONSTRAINT temporal_rng_pk PRIMARY KEY (valid_at WITHOUT OVERLAPS)\n> +);\n> +ERROR:  syntax error at or near \"WITHOUT\"\n> +LINE 3:  CONSTRAINT temporal_rng_pk PRIMARY KEY (valid_at WITHOUT OV...\n> +                                                          ^\n> \n> I think this error is confusing.  The SQL standard requires at least one \n> non-period column in a PK.  I don't know why that is or why we should \n> implement it.  But if we want to implement it, maybe we should enforce \n> that in parse analysis rather than directly in the parser, to be able to \n> produce a more friendly error message.\n\nOkay.\n\n(I think the reason the standard requires one non-period column is to \nidentify the \"entity\". If philosophically the row is an Aristotelian \nproposition about that thing, the period qualifies it as true just \nduring some time span. So the scalar part is doing the work that a PK \nconventionally does, and the period part does something else. Perhaps a \nPK/UNIQUE constraint with no scalar part would still be useful, but not \nvery often I think, and I'm not sure it makes sense to call it PK/UNIQUE.)\n\n> +-- PK with a range column/PERIOD that isn't there:\n> +CREATE TABLE temporal_rng (\n> +       id INTEGER,\n> +       CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT \n> OVERLAPS)\n> +);\n> +ERROR:  range or PERIOD \"valid_at\" in WITHOUT OVERLAPS does not exist\n> \n> I think here we should just produce a \"column doesn't exist\" error \n> message, the same as if the \"id\" column was invalid.  We don't need to \n> get into the details of what kind of column it should be.  That is done \n> in the next test\n\nI'll change it. The reason for the different wording is that it might \nnot be a column at all. It might be a PERIOD. So what about just \"column \nor PERIOD doesn't exist\"? (Your suggestion is fine too though.)\n\n> +ERROR:  column \"valid_at\" in WITHOUT OVERLAPS is not a range type\n> \n> Also, in any case it would be nice to have a location pointer here (for \n> both cases).\n\nAgreed.\n\n> +-- PK with one column plus a range:\n> +CREATE TABLE temporal_rng (\n> +       -- Since we can't depend on having btree_gist here,\n> +       -- use an int4range instead of an int.\n> +       -- (The rangetypes regression test uses the same trick.)\n> +       id int4range,\n> +       valid_at tsrange,\n> +       CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT \n> OVERLAPS)\n> +);\n> \n> I'm confused why you are using int4range here (and in further tests) for \n> the scalar (non-range) part of the primary key.  Wouldn't a plaint int4 \n> serve here?\n\nA plain int4 would be better, and it would match the normal use-case, \nbut you must have btree_gist to create an index like that, and the \nregress tests can't assume we have that. Here is the part from \nsql/rangetypes.sql I'm referring to:\n\n--\n-- Btree_gist is not included by default, so to test exclusion\n-- constraints with range types, use singleton int ranges for the \"=\"\n-- portion of the constraint.\n--\n\ncreate table test_range_excl(\n room int4range,\n speaker int4range,\n during tsrange,\n exclude using gist (room with =, during with &&),\n exclude using gist (speaker with =, during with &&)\n);\n\n> +SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE \n> conname = 'temporal_rng_pk';\n> +                                pg_get_indexdef\n> +-------------------------------------------------------------------------------\n> + CREATE UNIQUE INDEX temporal_rng_pk ON temporal_rng USING gist (id, \n> valid_at)\n> \n> Shouldn't this somehow show the operator classes for the columns?  We \n> are using different operator classes for the id and valid_at columns, \n> aren't we?\n\nWe only print the operator classes if they are not the default, so they \ndon't appear here.\n\nI do suspect something more is desirable though. For exclusion \nconstraints we replace everything before the columns with just \"EXCLUDE \nUSING gist\". I could embed WITHOUT OVERLAPS but it's not valid syntax in \nCREATE INDEX. Let me know if you have any ideas.\n\n> +-- PK with USING INDEX (not possible):\n> +CREATE TABLE temporal3 (\n> +       id int4range,\n> +       valid_at tsrange\n> +);\n> +CREATE INDEX idx_temporal3_uq ON temporal3 USING gist (id, valid_at);\n> +ALTER TABLE temporal3\n> +       ADD CONSTRAINT temporal3_pk\n> +       PRIMARY KEY USING INDEX idx_temporal3_uq;\n> +ERROR:  \"idx_temporal3_uq\" is not a unique index\n> +LINE 2:  ADD CONSTRAINT temporal3_pk\n> +             ^\n> +DETAIL:  Cannot create a primary key or unique constraint using such an \n> index.\n> \n> Could you also add a test where the index is unique and the whole thing \n> does work?\n\nNo problem!\n\n> Apart from the tests, how about renaming the column \n> pg_constraint.contemporal to something like to conwithoutoverlaps?\n\nIs that too verbose? I've got some code already changing it to \nconoverlaps but I'm probably happier with conwithoutoverlaps, assuming \nno one else minds it.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Tue, 10 Oct 2023 21:47:01 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 10/11/23 05:47, Paul Jungwirth wrote:\n>> +SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE \n>> conname = 'temporal_rng_pk';\n>> +                                pg_get_indexdef\n>> +-------------------------------------------------------------------------------\n>> + CREATE UNIQUE INDEX temporal_rng_pk ON temporal_rng USING gist (id, \n>> valid_at)\n>>\n>> Shouldn't this somehow show the operator classes for the columns?  We \n>> are using different operator classes for the id and valid_at columns, \n>> aren't we?\n> \n> We only print the operator classes if they are not the default, so they \n> don't appear here.\n> \n> I do suspect something more is desirable though. For exclusion \n> constraints we replace everything before the columns with just \"EXCLUDE \n> USING gist\". I could embed WITHOUT OVERLAPS but it's not valid syntax in \n> CREATE INDEX. Let me know if you have any ideas.\n\nWhy not? The standard does not mention indexes (although some \ndiscussions last week might change that) so we can change the syntax for \nit as we wish. Doing so would also allow us to use ALTER TABLE ... \nUSING INDEX for such things.\n-- \nVik Fearing\n\n\n\n", "msg_date": "Fri, 13 Oct 2023 02:48:28 +0200", "msg_from": "Vik Fearing <vik@postgresfriends.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Tue, Sep 26, 2023 at 4:21 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On 9/24/23 21:52, jian he wrote:\n> > On Wed, Sep 20, 2023 at 10:50 AM Paul Jungwirth\n> > <pj@illuminatedcomputing.com> wrote:\n> >>\n> >> On 9/17/23 20:11, jian he wrote:\n> >>> small issues so far I found, v14.\n> >>\n> >> Thank you again for the review! v15 is attached.\n> >>\n> >\n> > hi. some tiny issues.\n>\n> Rebased v16 patches attached.\n\nCan you rebase it?\nchanges in\nhttps://git.postgresql.org/cgit/postgresql.git/log/src/backend/executor/nodeModifyTable.c\nhttps://git.postgresql.org/cgit/postgresql.git/log/src/backend/commands/tablecmds.c\nmake it no longer applicable.\n\nI try to manually edit the patch to make it applicable.\nbut failed at tablecmds.c\n\n\n", "msg_date": "Mon, 16 Oct 2023 12:33:10 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Hi.\nbased on v16.\n\n/* Look up the FOR PORTION OF name requested. */\nrange_attno = attnameAttNum(targetrel, range_name, false);\nif (range_attno == InvalidAttrNumber)\nereport(ERROR,\n(errcode(ERRCODE_UNDEFINED_COLUMN),\nerrmsg(\"column or period \\\"%s\\\" of relation \\\"%s\\\" does not exist\",\nrange_name,\nRelationGetRelationName(targetrel)),\nparser_errposition(pstate, forPortionOf->range_name_location)));\nattr = TupleDescAttr(targetrel->rd_att, range_attno - 1);\n// TODO: check attr->attisdropped (?),\n// and figure out concurrency issues with that in general.\n// It should work the same as updating any other column.\n\nI don't think we need to check attr->attisdropped here.\nbecause the above function attnameAttNum already does the job.\n--------------------------------------------\nbool\nget_typname_and_namespace(Oid typid, char **typname, char **typnamespace)\n{\nHeapTuple tp;\n\ntp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));\nif (HeapTupleIsValid(tp))\n{\nForm_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);\n\n*typname = pstrdup(NameStr(typtup->typname));\n*typnamespace = get_namespace_name(typtup->typnamespace);\nReleaseSysCache(tp);\nreturn *typnamespace;\n\n\"return *typnamespace;\" should be \"return true\"?\nMaybe name it to get_typname_and_typnamespace?\n-----------------------------------------------------------------------\nif (!get_typname_and_namespace(attr->atttypid, &range_type_name,\n&range_type_namespace))\nelog(ERROR, \"missing range type %d\", attr->atttypid);\n\nyou can just `elog(ERROR, \"missing range type %s\", range_type_name);` ?\nAlso, this should be placed just below if (!type_is_range(attr->atttypid))?\n-----------------------------------------------------------------------\nsrc/backend/catalog/objectaddress.c\n\nif (OidIsValid(per->perrelid))\n{\nStringInfoData rel;\n\ninitStringInfo(&rel);\ngetRelationDescription(&rel, per->perrelid, false);\nappendStringInfo(&buffer, _(\"period %s on %s\"),\nNameStr(per->pername), rel.data);\npfree(rel.data);\n}\nelse\n{\nappendStringInfo(&buffer, _(\"period %s\"),\nNameStr(per->pername));\n}\n\nperiods are always associated with the table, is the above else branch correct?\n-----------------------------------------------------------------------\nFile: src/backend/commands/tablecmds.c\n7899: /*\n7900: * this test is deliberately not attisdropped-aware, since if one tries to\n7901: * add a column matching a dropped column name, it's gonna fail anyway.\n7902: *\n7903: * XXX: Does this hold for periods?\n7904: */\n7905: attTuple = SearchSysCache2(ATTNAME,\n7906: ObjectIdGetDatum(RelationGetRelid(rel)),\n7907: PointerGetDatum(pername));\n\nXXX: Does this hold for periods?\nYes. we can add the following 2 sql for code coverage.\nalter table pt add period for tableoid (ds, de);\nalter table pt add period for \"........pg.dropped.4........\" (ds, de);\n\n\n", "msg_date": "Fri, 20 Oct 2023 20:45:05 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "hi. also based on v16.\n-----------------tests.\ndrop table if exists for_portion_of_test1;\nCREATE unlogged TABLE for_portion_of_test1 (id int4range, valid_at\ntsrange,name text );\nINSERT INTO for_portion_of_test1 VALUES ('[1,1]', NULL,\n'[1,1]_NULL'),('[1,1]', '(,)', '()_[1,]')\n,('[1,1]', 'empty', '[1,1]_empty'),(NULL,NULL, NULL), (nuLL,\n'(2018-01-01,2019-01-01)','misc');\n--1\nUPDATE for_portion_of_test1 FOR PORTION OF valid_at FROM NULL TO NULL\nSET name = 'for_portition_NULLtoNULL';\nselect * from for_portion_of_test1;\n--2\nUPDATE for_portion_of_test1 FOR PORTION OF valid_at FROM null TO\nUNBOUNDED SET name = 'NULL_TO_UNBOUNDED';\nselect * from for_portion_of_test1;\n--3\nUPDATE for_portion_of_test1 FOR PORTION OF valid_at FROM UNBOUNDED TO\nnull SET name = 'UNBOUNDED__TO_NULL';\nselect * from for_portion_of_test1;\n--4\nUPDATE for_portion_of_test1 FOR PORTION OF valid_at FROM UNBOUNDED TO\nUNBOUNDED SET name = 'UNBOUNDED__TO_UNBOUNDED';\nselect * from for_portion_of_test1;\n------------------------\nFile: /src/backend/executor/nodeModifyTable.c\n1277: oldRange = slot_getattr(oldtupleSlot,\nforPortionOf->rangeVar->varattno, &isNull);\n1278:\n1279: if (isNull)\n1280: elog(ERROR, \"found a NULL range in a temporal table\");\n1281: oldRangeType = DatumGetRangeTypeP(oldRange);\n\nI wonder when this isNull will be invoked. the above tests won't\ninvoke the error.\nalso the above test, NULL seems equivalent to unbounded. FOR PORTION\nOF \"from\" and \"to\" both bound should not be null?\n\nwhich means the following code does not work as intended? I also\ncannot find a way to invoke the following elog error branch.\nFile:src/backend/executor/nodeModifyTable.c\n4458: exprState = ExecPrepareExpr((Expr *) forPortionOf->targetRange, estate);\n4459: targetRange = ExecEvalExpr(exprState, econtext, &isNull);\n4460: if (isNull)\n4461: elog(ERROR, \"Got a NULL FOR PORTION OF target range\");\n\n---------------------------\ni also made some changes in the function range_leftover_internal,\nExecForPortionOfLeftovers.\nplease see the attached patch.", "msg_date": "Mon, 23 Oct 2023 08:00:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Wed, Oct 11, 2023 at 12:47 PM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On 9/25/23 14:00, Peter Eisentraut wrote:\n> > Looking through the tests in v16-0001:\n> >\n> > +-- PK with no columns just WITHOUT OVERLAPS:\n> > +CREATE TABLE temporal_rng (\n> > + valid_at tsrange,\n> > + CONSTRAINT temporal_rng_pk PRIMARY KEY (valid_at WITHOUT OVERLAPS)\n> > +);\n> > +ERROR: syntax error at or near \"WITHOUT\"\n> > +LINE 3: CONSTRAINT temporal_rng_pk PRIMARY KEY (valid_at WITHOUT OV...\n> > + ^\n> >\n> > I think this error is confusing. The SQL standard requires at least one\n> > non-period column in a PK. I don't know why that is or why we should\n> > implement it. But if we want to implement it, maybe we should enforce\n> > that in parse analysis rather than directly in the parser, to be able to\n> > produce a more friendly error message.\n>\n> Okay.\n>\n> (I think the reason the standard requires one non-period column is to\n> identify the \"entity\". If philosophically the row is an Aristotelian\n> proposition about that thing, the period qualifies it as true just\n> during some time span. So the scalar part is doing the work that a PK\n> conventionally does, and the period part does something else. Perhaps a\n> PK/UNIQUE constraint with no scalar part would still be useful, but not\n> very often I think, and I'm not sure it makes sense to call it PK/UNIQUE.)\n>\n> > +-- PK with a range column/PERIOD that isn't there:\n> > +CREATE TABLE temporal_rng (\n> > + id INTEGER,\n> > + CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT\n> > OVERLAPS)\n> > +);\n> > +ERROR: range or PERIOD \"valid_at\" in WITHOUT OVERLAPS does not exist\n> >\n> > I think here we should just produce a \"column doesn't exist\" error\n> > message, the same as if the \"id\" column was invalid. We don't need to\n> > get into the details of what kind of column it should be. That is done\n> > in the next test\n>\n> I'll change it. The reason for the different wording is that it might\n> not be a column at all. It might be a PERIOD. So what about just \"column\n> or PERIOD doesn't exist\"? (Your suggestion is fine too though.)\n>\n> > +ERROR: column \"valid_at\" in WITHOUT OVERLAPS is not a range type\n> >\n> > Also, in any case it would be nice to have a location pointer here (for\n> > both cases).\n>\n> Agreed.\n>\n\nI refactored findNeworOldColumn to better handle error reports.\nplease check the attached.", "msg_date": "Wed, 25 Oct 2023 14:14:34 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "V16 patch doc/src/sgml/html/sql-createtable.html doc SET NULL description:\n`\nSET NULL [ ( column_name [, ... ] ) ]\nSet all of the referencing columns, or a specified subset of the\nreferencing columns, to null. A subset of columns can only be\nspecified for ON DELETE actions.\nIn a temporal foreign key, the change will use FOR PORTION OF\nsemantics to constrain the effect to the bounds of the referenced row.\n`\n\nI think it means, if the foreign key has PERIOD column[s], then the\nPERIOD column[s] will not be set to NULL in {ON DELETE|ON UPDATE}. We\ncan also use FOR PORTION OF semantics to constrain the effect to the\nbounds of the referenced row.\nsee below demo:\n\n\nBEGIN;\ndrop table if exists temporal_rng CASCADE;\ndrop table if exists temporal_fk_rng2rng CASCADE;\nCREATE unlogged TABLE temporal_rng (id int4range,valid_at tsrange);\nALTER TABLE temporal_rng ADD CONSTRAINT temporal_rng_pk PRIMARY KEY\n(id, valid_at WITHOUT OVERLAPS);\nCREATE unlogged TABLE temporal_fk_rng2rng (id int4range,valid_at\ntsrange,parent_id int4range,\nCONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal_rng (id, PERIOD valid_at) on update set null ON\nDELETE SET NULL);\n\nINSERT INTO temporal_rng VALUES ('[11,11]', tsrange('2018-01-01',\n'2021-01-01'));\nINSERT INTO temporal_fk_rng2rng VALUES ('[7,7]', tsrange('2018-01-01',\n'2021-01-01'), '[11,11]');\nDELETE FROM temporal_rng WHERE id = '[11,11]';\ntable temporal_fk_rng2rng;\ncommit;\n-----------------------------------------------------\nalso\n\"REFERENCES temporal_rng (id, PERIOD valid_at) ON UPDATE SET NULL ON\nDELETE SET NULL)\"\nis the same as\n\"REFERENCES temporal_rng (id, PERIOD valid_at) ON UPDATE SET NULL ON\nDELETE SET NULL (parent_id)\"\nin the current implementation.\nwe might need to change the pg_constraint column \"confdelsetcols\" description.\n-------\nthe above also applies to SET DEFAULT.\n\n--------------------------------------------------------------------------------------------------------------------------\ncan you add the following for the sake of code coverage. I think\nsrc/test/regress/sql/without_overlaps.sql can be simplified.\n\n--- common template for test foreign key constraint.\nCREATE OR REPLACE PROCEDURE overlap_template()\nLANGUAGE SQL\nAS $$\nDROP TABLE IF EXISTS temporal_rng CASCADE;\nDROP TABLE IF EXISTS temporal_fk_rng2rng CASCADE;\nCREATE UNLOGGED TABLE temporal_rng (id int4range,valid_at tsrange);\nALTER TABLE temporal_rng\nADD CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS);\nCREATE UNLOGGED TABLE temporal_fk_rng2rng (\nid int4range,\nvalid_at tsrange,\nparent_id int4range,\nCONSTRAINT temporal_fk_rng2rng_fk\nFOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal_rng (id, PERIOD valid_at)\nON UPDATE no action ON DELETE no action\nDEFERRABLE\n);\n$$;\ncall overlap_template();\n\n--- on update/delete restrict\n-- coverage for TRI_FKey_restrict_upd,TRI_FKey_restrict_del.\nBEGIN;\nALTER TABLE temporal_fk_rng2rng\nDROP CONSTRAINT temporal_fk_rng2rng_fk,\nADD CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal_rng(id,PERIOD valid_at) ON UPDATE RESTRICT ON\nDELETE RESTRICT;\n\nINSERT INTO temporal_rng VALUES ('[11,11]', tsrange('2018-01-01',\n'2021-01-01'));\nINSERT INTO temporal_fk_rng2rng VALUES ('[7,7]', tsrange('2018-01-01',\n'2020-01-01'), '[11,11]');\nsavepoint s;\n\nUPDATE temporal_rng FOR PORTION OF valid_at FROM '2018-01-01' TO '2018-01-03'\nSET id = '[9,9]' WHERE id = '[11,11]';\nROLLBACK to s;\ndelete from temporal_rng FOR PORTION OF valid_at FROM '2018-01-01' TO\n'2020-01-01';\nROLLBACK to s;\n--this one should not have error.\ndelete from temporal_rng FOR PORTION OF valid_at FROM '2020-01-01' TO\n'2021-01-01';\ntable temporal_rng;\nROLLBACK;\n\n-------------\n--- on delete set column list coverage for function tri_set. branch\n{if (riinfo->ndelsetcols != 0)}\nBEGIN;\nALTER TABLE temporal_fk_rng2rng\nDROP CONSTRAINT temporal_fk_rng2rng_fk,\nADD CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal_rng(id,PERIOD valid_at) ON DELETE set default(parent_id);\n\nALTER TABLE temporal_fk_rng2rng ALTER COLUMN parent_id SET DEFAULT '[2,2]';\nALTER TABLE temporal_fk_rng2rng ALTER COLUMN valid_at SET DEFAULT tsrange'(,)';\nINSERT INTO temporal_rng VALUES ('[11,11]', tsrange('2018-01-01',\n'2021-01-01'));\nINSERT INTO temporal_fk_rng2rng VALUES ('[7,7]', tsrange('2018-01-01',\n'2020-01-01'), '[11,11]');\ninsert into temporal_rng values('[2,2]','(,)');\nsavepoint s;\ndelete from temporal_rng FOR PORTION OF valid_at FROM '2018-01-01' TO\n'2019-01-01' where id = '[11,11]';\n-- delete from temporal_rng where id = '[11,11]';\ntable temporal_fk_rng2rng;\nrollback;\n\n\n", "msg_date": "Sat, 28 Oct 2023 16:25:57 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "hi.\n\n* The attached patch makes foreign keys with PERIOD fail if any of the\nforeign key columns is \"generated columns\".\n\n* The following queries will cause segmentation fault. not sure the\nbest way to fix it. the reason\nin LINE: numpks = transformColumnNameList(RelationGetRelid(pkrel),\nfkconstraint->pk_attrs, pkattnum, pktypoid);\nbegin;\ndrop table if exists temporal3,temporal_fk_rng2rng;\nCREATE TABLE temporal3 (id int4range,valid_at tsrange,\nCONSTRAINT temporal3_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS));\nCREATE TABLE temporal_fk_rng2rng (\nid int4range,valid_at tsrange,parent_id int4range,\nCONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS),\nCONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal3 (id, valid_at)\n);\n\n* change the function FindFKComparisonOperators's \"eqstrategy\" to\nmake pg_constraint record correct {conpfeqop,conppeqop,conffeqop}.\n\n* fix the ON DELETE SET NULL/DEFAULT (columnlist). Now the following\nqueries error will be more consistent.\nALTER TABLE temporal_fk_rng2rng DROP CONSTRAINT temporal_fk_rng2rng_fk,\nADD CONSTRAINT temporal_fk_rng2rng_fk\nFOREIGN KEY (parent_id, PERIOD valid_at) REFERENCES temporal_rng\nON DELETE SET DEFAULT(valid_at);\n--ON DELETE SET NULL(valid_at);\n\n* refactor restrict_cascading_range function.\n\n* you did if (numfks != numpks) before if (is_temporal) {numfks +=\n1;}, So I changed the code order to make the error report more\nconsistent.\n\nanyway, I put it in one patch. please check the attached.", "msg_date": "Mon, 30 Oct 2023 08:00:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Thanks for all the feedback! Consolidating several emails below:\n\n > On Fri, Oct 20, 2023 at 5:45 AM jian he <jian.universality@gmail.com> \nwrote:\n > I don't think we need to check attr->attisdropped here\n\nChanged.\n\n > \"return *typnamespace;\" should be \"return true\"?\n\nNo, but I added a comment to clarify.\n\n > Maybe name it get_typname_and_typnamespace?\n\nI could go either way on this but I left it as-is since it seems \nredundant, and there are other functions here that don't repeat the \nthree-letter prefix.\n\n > you can just `elog(ERROR, \"missing range type %s\", range_type_name);` ?\n\nNo, because this failure happens trying to look up the name.\n\n > Also, this should be placed just below if \n(!type_is_range(attr->atttypid))?\n\nWe ereport there (not elog) because it's a user error (using a \nnon-rangetype for the option), not an internal error.\n\n > periods are always associated with the table, is the above else \nbranch correct?\n\nTrue but I'm following the code just above for OCLASS_CONSTRAINT. Even \nif this case is unexpected, it seems better to handle it gracefully than \nhave a harder failure.\n\n > XXX: Does this hold for periods?\n > Yes. we can add the following 2 sql for code coverage.\n > alter table pt add period for tableoid (ds, de);\n > alter table pt add period for \"........pg.dropped.4........\" (ds, de);\n\nAdded, thanks!\n\n > On Sun, Oct 22, 2023 at 5:01 PM jian he <jian.universality@gmail.com> \nwrote:\n > drop table if exists for_portion_of_test1;\n > CREATE unlogged TABLE for_portion_of_test1 (id int4range, valid_at\n > tsrange,name text );\n > ...\n\nThese are good tests, thanks! Originally FOR PORTION OF required a \nPRIMARY KEY or UNIQUE constraint, so we couldn't find NULLs here, but we \nchanged that a while back, so it's good to verify it handles that case.\n\n > 1279: if (isNull)\n > 1280: elog(ERROR, \"found a NULL range in a temporal table\");\n > 1281: oldRangeType = DatumGetRangeTypeP(oldRange);\n >\n > I wonder when this isNull will be invoked. The above tests won't\n > invoke the error.\n\nAs far as I can tell it shouldn't happen, which is why it's elog. The \nnew tests don't hit it because a NULL range should never match the range \nin the FROM+TO of the FOR PORTION OF clause. Maybe this should even be \nan assert, but I think I prefer elog for the nicer error message and \nless-local condition.\n\n > also the above test, NULL seems equivalent to unbounded. FOR PORTION\n > OF \"from and \"to\" both bound should not be null?\n\nCorrect, NULL and UNBOUNDED mean the same thing. This matches the \nmeaning of NULL in ranges.\n\n > which means the following code does not work as intended? I also\n > cannot find a way to invoke the following elog error branch.\n > File:src/backend/executor/nodeModifyTable.c\n > 4458: exprState = ExecPrepareExpr((Expr *) forPortionOf->targetRange, \nestate);\n > 4459: targetRange = ExecEvalExpr(exprState, econtext, &isNull);\n > 4460: if (isNull)\n > 4461: elog(ERROR, \"Got a NULL FOR PORTION OF target range\");\n\nHere we're checking the \"target range\", in other words the range built \nfrom the FROM+TO of the FOR PORTION OF clause---not a range from a \ntuple. Finding a NULL here *for the range itself* would indeed be an \nerror. A NULL *bound* means \"unbounded\", but a NULL *range* should not \nbe possible to construct.\n\n > I also made some changes in the function range_leftover_internal,\n\nI'm not really comfortable with these changes. \"Leftover\" doesn't refer \nto \"left\" vs \"right\" but to what *remains* (what is \"left behind\") after \nthe UPDATE/DELETE. Also r1 and r2 are common parameter names throughout \nthe rangetypes.c file, and they are more general than the names you've \nsuggested. We shouldn't assume we will only ever call this function from \nthe FOR PORTION OF context.\n\n > ExecForPortionOfLeftovers\n\nThanks! I've made these code changes (with slight modifications, e.g. no \nneed to call ExecFetchSlotHeapTuple if there are no leftovers).\n\nI'm not sure about the comment change though---I want to verify that \nmyself (particularly the case when the partition key is updated so we \nhave already been routed to a different partition than the old tuple).\n\n > On Tue, Oct 24, 2023 at 11:14 PM jian he \n<jian.universality@gmail.com> wrote:\n > I refactored findNewOrOldColumn to better handle error reports.\n\nThanks, I like your changes here. Applied with some small adjustments.\n\n > On Sat, Oct 28, 2023 at 1:26 AM jian he <jian.universality@gmail.com> \nwrote:\n > I think it means, if the foreign key has PERIOD column[s], then the\n > PERIOD column[s] will not be set to NULL in {ON DELETE|ON UPDATE}. . . .\n\nI reworded this to explain that the PERIOD element will not be set to \nNULL (or the default value).\n\n > can you add the following for the sake of code coverage. I think\n > src/test/regress/sql/without_overlaps.sql can be simplified.\n > ...\n > call overlaps_template();\n\nI'm not sure I want to add indirection like this to the tests, which I \nthink makes them harder to read (and update). But there is indeed a \ntough combinatorial explosion, especially in the foreign key tests. We \nwant to cover {ON DELETE,ON UPDATE} {NO ACTION,RESTRICT,CASCADE,SET \nNULL,SET DEFAULT} when {child inserts,child updates,parent \nupdates,parent deletes} with {one,two} scalar columns and {,not} \npartitioned. Also ON DELETE SET {NULL,DEFAULT} against only a subset of \ncolumns. I updated the test cases to delete and re-use the same id \nvalues, so at least they are more isolated and thus easier to edit. I \nalso added tests for `(parent_id1, parent2, PERIOD valid_at)` cases as \nwell as `ON DELETE SET {NULL,DEFAULT} (parent_id1)`. (I think that last \ncase covers what you are trying to do here, but if I misunderstood \nplease let me know.)\n\nI haven't worked through your last email yet, but this seemed like \nenough changes to warrant an update.\n\nNew patches attached (rebased to 0bc726d9).\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Thu, 2 Nov 2023 13:21:37 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "hi. based on v17. I found several doc related issues. previously I\ndidn't look closely....\n\n+ </para>\n+ In a temporal foreign key, the delete/update will use\n+ <literal>FOR PORTION OF</literal> semantics to constrain the\n+ effect to the bounds being deleted/updated in the referenced row.\n+ </para>\n\nThe first \"para\" should be <para> ?\n---\nThere are many warnings after #define WRITE_READ_PARSE_PLAN_TREES\nsee: http://cfbot.cputube.org/highlights/all.html#4308\nDoes that mean oue new change in gram.y is somehow wrong?\n------\nsgml/html/sql-update.html:\n\"range_or_period_name\nThe range column or period to use when performing a temporal update.\nThis must match the range or period used in the table's temporal\nprimary key.\"\n\nIs the second sentence unnecessary? since no primary can still do \"for\nportion of update\".\n\nsgml/html/sql-update.html:\n\"start_time\nThe earliest time (inclusive) to change in a temporal update. This\nmust be a value matching the base type of the range or period from\nrange_or_period_name. It may also be the special value MINVALUE to\nindicate an update whose beginning is unbounded.\"\n\nprobably something like the following:\n\"lower_bound\"\nThe lower bound (inclusive) to change in an overlap update. This must\nbe a value matching the base type of the range or period from\nrange_or_period_name. It may also be the special value UNBOUNDED to\nindicate an update whose beginning is unbounded.\"\n\nObviously the \"start_time\" reference also needs to change, and the\nsql-delete.html reference also needs to change.\n----------------------------------\nUPDATE for_portion_of_test FOR PORTION OF valid_at FROM NULL TO\n\"unbounded\" SET name = 'NULL to NULL';\nshould fail, but not. double quoted unbounded is a column reference, I assume.\n\nThat's why I am confused with the function transformForPortionOfBound.\n\"if (nodeTag(n) == T_ColumnRef)\" part.\n-----------------------------------\nin create_table.sgml. you also need to add WITHOUT OVERLAPS related\ninfo into <varlistentry id=\"sql-createtable-parms-unique\">\n\n\n", "msg_date": "Tue, 7 Nov 2023 15:07:15 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 02.11.23 21:21, Paul Jungwirth wrote:\n> New patches attached (rebased to 0bc726d9).\n\nI went over the patch \nv17-0001-Add-temporal-PRIMARY-KEY-and-UNIQUE-constraints.patch in more \ndetail. Attached is a fixup patch that addresses a variety of cosmetic \nissues.\n\nSome details:\n\n- Renamed contemporal to conwithoutoverlaps, as previously discussed. \nAlso renamed various variables and function arguments similarly.\n\n- Rearranged text in CREATE TABLE reference page so there are no forward \nreferences. (Describe WITHOUT OVERLAPS under UNIQUE and then PRIMARY \nKEy says \"see above\", rather than describe it under PRIMARY KEY and have \nUNIQUE say \"see below.)\n\n- Removed various bits that related to temporal foreign keys, which \nbelong in a later patch.\n\n- Reverted some apparently unrelated changes in src/backend/catalog/index.c.\n\n- Removed the \"temporal UNIQUE\" constraint_type assignment in \nDefineIndex(). This is meant to be used in error messages and should \nrefer to actual syntax. I think it's fine without it this change.\n\n- Field contemporal in NewConstraint struct is not used by this patch.\n\n- Rearrange the grammar so that the rule with WITHOUT OVERLAPS is just a \nBoolean attribute rather than column name plus keywords. This was kind \nof confusing earlier and led to weird error messages for invalid syntax. \n I kept the restriction that you need at least one non-overlaps column, \nbut that is now enforced in parse analysis, not in the grammar. (But \nmaybe we don't need it?)\n\n(After your earlier explanation, I'm content to just allow one WITHOUT \nOVERLAPS column for now.)\n\n- Some places looked at conexclop to check whether something is a \nWITHOUT OVERLAPS constraint, instead of looking at conwithoutoverlaps \ndirectly.\n\n- Removed some redundant \"unlike\" entries in the pg_dump tests. (This \ncaused cfbot tests to fail.)\n\n- Moved the \"without_overlaps\" test later in the schedule. It should at \nleast be after \"constraints\" so that normal constraints are tested first.\n\n\nTwo areas that could be improved:\n\n1) In src/backend/commands/indexcmds.c, \nget_index_attr_temporal_operator() has this comment:\n\n+ * This seems like a hack\n+ * but I can't find any existing lookup function\n+ * that knows about pseudotypes.\n\nThis doesn't see very confident. ;-) I don't quite understand this. Is \nthis a gap in the currently available APIs, do we need to improve \nsomething here, or does this need more research?\n\n2) In src/backend/parser/parse_utilcmd.c, transformIndexConstraint(), \nthere is too much duplication between the normal and the if \n(constraint->without_overlaps) case, like the whole not-null constraints \nstuff at the end. This should be one code block with a few conditionals \ninside. Also, the normal case deals with things like table inheritance, \nwhich the added parts do not. Is this all complete?\n\nI'm not sure the validateWithoutOverlaps() function is needed at this \npoint in the code. We just need to check that the column exists, which \nthe normal code path already does, and then have the index creation code \nlater check that an appropriate overlaps operator exists. We don't even \nneed to restrict this to range types. Consider for example, it's \npossible that a type does not have a btree equality operator. We don't \ncheck that here either, but let the index code later check it.\n\n\nOverall, with these fixes, I think this patch is structurally okay. We \njust need to make sure we have all the weird corner cases covered.", "msg_date": "Thu, 9 Nov 2023 14:47:12 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 11/9/23 05:47, Peter Eisentraut wrote:\n> I went over the patch \n> v17-0001-Add-temporal-PRIMARY-KEY-and-UNIQUE-constraints.patch in more \n> detail\n\nThanks Peter!\n\nI'm about halfway through jian he's last two emails. I'll address your \nfeedback also. I wanted to reply to this without waiting though:\n\n> Overall, with these fixes, I think this patch is structurally okay.  We \n> just need to make sure we have all the weird corner cases covered.\n\nOne remaining issue I know about is with table partitions whose column \norder has changed. I've got an in-progress fix for that, but I've been \nprioritizing reviewer feedback the last few months. Just want to make \nsure you know about it for now.\n\nThanks!\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Thu, 9 Nov 2023 09:15:31 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "based on v17.\n\nbegin;\ndrop table if exists s1;\nCREATE TABLE s1 (id numrange, misc int, misc1 text);\ncreate role test101 login;\ngrant update, select on s1 to test101;\ninsert into s1 VALUES ('[1,1000]',2);\nset session authorization test101;\nupdate s1 set id = '[1,1000]';\nsavepoint sp1;\nupdate s1 FOR PORTION OF id from 10 to 100 set misc1 = 'test';\ntable s1;\nsavepoint sp2;\ninsert into s1 VALUES ('[2,1000]',12);\nrollback;\n\nIn UPDATE FOR PORTION OF from x to y, if range [x,y) overlaps with the\n\"source\" range\nthen the UPDATE action would be UPDATE and INSERT.\nThe above UPDATE FOR PORTION OF query should fail?\nUPDATE FOR PORTION OF, may need insert privilege. We also need to document this.\nSimilarly, we also need to apply the above logic to DELETE FOR PORTION OF.\n-------------------------------------------------------\n+ <para>\n+ If the table has a <link\nlinkend=\"ddl-periods-application-periods\">range column\n+ or <literal>PERIOD</literal></link>, you may supply a\n\nshould be\n\n+ <para>\n+ If the table has a range column or <link\nlinkend=\"ddl-periods-application-periods\">\n+ <literal>PERIOD</literal></link>, you may supply a\n\nsimilarly the doc/src/sgml/ref/delete.sgml the link reference also broken.\n--------------------------------------------------------\n <para>\n If the table has a range column or <link\nlinkend=\"ddl-periods-application-periods\">\n <literal>PERIOD</literal></link>, you may supply a\n <literal>FOR PORTION OF</literal> clause, and your update will only\naffect rows\n that overlap the given interval. Furthermore, if a row's span extends outside\n the <literal>FOR PORTION OF</literal> bounds, then it will be\ntruncated to fit\n within the bounds, and new rows spanning the \"cut off\" duration will be\n inserted to preserve the old values.\n </para>\n\n \"given interval\", \"cut off\" these words, imho, feel not so clear.\nWe also need a document that:\n \"UPDATE FOR PORTION OF\" is UPDATE and INSERT (if overlaps).\nIf the \"UPDATE FOR PORTION OF\" range overlaps then\nIt will invoke triggers in the following order: before row update,\nbefore row insert, after row insert. after row update.\n---------------------------------------\nsrc/test/regress/sql/for_portion_of.sql\nYou only need to create two triggers?\nsince for_portion_of_trigger only raises notice to output the triggers\nmeta info.\n\nCREATE TRIGGER trg_for_portion_of_before\n BEFORE INSERT OR UPDATE OR DELETE ON for_portion_of_test\n FOR EACH ROW\n EXECUTE FUNCTION for_portion_of_trigger();\nCREATE TRIGGER trg_for_portion_of_after\nAFTER INSERT OR UPDATE OR DELETE ON for_portion_of_test\nFOR EACH ROW\nEXECUTE FUNCTION for_portion_of_trigger();\n\n\n", "msg_date": "Fri, 17 Nov 2023 15:12:30 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 11/9/23 05:47, Peter Eisentraut wrote:\n> I went over the patch v17-0001-Add-temporal-PRIMARY-KEY-and-UNIQUE-constraints.patch in more \n> detail.  Attached is a fixup patch that addresses a variety of cosmetic issues.\n\nThanks for the review! This all looks great to me, and it's applied in the attached patch (with one \ntypo correction in a C comment). The patch addresses some of jian he's feedback too but I'll reply \nto those emails separately.\n\n> Two areas that could be improved:\n> \n> 1) In src/backend/commands/indexcmds.c, get_index_attr_temporal_operator() has this comment:\n> \n> +    * This seems like a hack\n> +    * but I can't find any existing lookup function\n> +    * that knows about pseudotypes.\n> \n> This doesn't see very confident. ;-)  I don't quite understand this.  Is this a gap in the currently \n> available APIs, do we need to improve something here, or does this need more research?\n\nI've improved this a bit but I'm still concerned about part of it.\n\nFirst the improved part: I realized I should be calling get_opclass_opfamily_and_input_type first \nand passing the opcintype to get_opfamily_member, which solves the problem of having a concrete \nrangetype but needing an operator that targets anyrange. We do the same thing with partition keys.\n\nBut I feel the overall approach is wrong: originally I used hardcoded \"=\" and \"&&\" operators, and \nyou asked me to look them up by strategy number instead. But that leads to trouble with core gist \ntypes vs btree_gist types. The core gist opclasses use RT*StrategyNumbers, but btree_gist creates \nopclasses with BT*StrategyNumbers. I don't see any way to ask ahead of time which class of strategy \nnumbers are used by a given opclass. So I have code like this:\n\n *strat = RTEqualStrategyNumber;\n opname = \"equality\";\n *opid = get_opfamily_member(opfamily, opcintype, opcintype, *strat);\n\n /*\n * For the non-overlaps key elements,\n * try both RTEqualStrategyNumber and BTEqualStrategyNumber.\n * If you're using btree_gist then you'll need the latter.\n */\n if (!OidIsValid(*opid))\n {\n *strat = BTEqualStrategyNumber;\n *opid = get_opfamily_member(opfamily, opcintype, opcintype, *strat);\n }\n\nI do a similar thing for foreign keys.\n\nBut that can't be right. I added a scary comment there in this patch, but I'll explain here too:\n\nIt's only by luck that RTEqualStrategyNumber (18) is bigger than any BT*StrategyNumber. If I checked \nin the reverse order, I would always find an operator---it would just sometimes be the wrong one! \nAnd what if someone has defined a new type+opclass with totally different strategy numbers? As far \nas I can tell, the gist AM doesn't require an opclass have any particular operators, only support \nfunctions, so the strategy numbers are \"private\" and can vary between opclasses.\n\nWhat we want is a way to ask which operators mean equality & overlaps for a given opclass. But the \nstrategy numbers aren't meaningful terms to ask the question.\n\nSo I think asking for \"=\" and \"&&\" is actually better here. Those will be correct for both core & \nbtree_gist, and they should also match user expectations for custom types. They are what you'd use \nin a roll-your-own temporal constraint via EXCLUDE. We can also document that we implement WITHOUT \nOVERLAPS with those operator names, so people can get the right behavior from custom types.\n\n(This also maybe lets us implement WITHOUT OVERLAPS for more than rangetypes, as you suggested. See \nbelow for more about that.)\n\nIt's taken me a while to grok the am/opclass/opfamily/amop interaction, and maybe I'm still missing \nsomething here. Let me know if that's the case!\n\n> 2) In src/backend/parser/parse_utilcmd.c, transformIndexConstraint(), there is too much duplication \n> between the normal and the if (constraint->without_overlaps) case, like the whole not-null \n> constraints stuff at the end.  This should be one code block with a few conditionals inside.  Also, \n> the normal case deals with things like table inheritance, which the added parts do not.  Is this all \n> complete?\n\nCleaned things up here. I agree it's much better now.\n\nAnd you're right, now you should be able to use an inherited column in a temporal PK/UQ constraint. \nI think I need a lot more test coverage for how this feature combines with inherited tables, so I'll \nwork on that.\n\n> I'm not sure the validateWithoutOverlaps() function is needed at this point in the code.\n\nAgreed, I removed it and moved the is-it-a-rangetype check into the caller.\n\n> We don't even need to \n> restrict this to range types.  Consider for example, it's possible that a type does not have a btree \n> equality operator.  We don't check that here either, but let the index code later check it.\n\nThat is very interesting. Perhaps we allow anything with equals and overlaps then?\n\nNote that we need more for FOR PORTION OF, foreign keys, and foreign keys with CASCADE/SET. So it \nmight be confusing if a type works with temporal PKs but not those other things. But if we \ndocumented what operators you need for each feature then you could implement as much as you liked.\n\nI like this direction a lot. It matches what I suggested in the conversation about multiple WITHOUT \nOVERLAPS/PERIOD columns: rather than having foreign keys and FOR PORTION OF know how to find \nn-dimensional \"leftovers\" we could leave it up the type, and just call a documented operator. (We \nwould need to add that operator for rangetypes btw, one that calls range_leftover_internal. It \nshould return an array (not a multirange!) of the untouched parts of the record.) This makes it easy \nto support bi/tri/n-temporal, spatial, multiranges, etc.\n\n(For spatial you probably want PostGIS instead, and I'm wary of over-abstracting here, but I like \nhow this \"leaves the door open\" for PostGIS to eventually support spatial PKs/FKs.)\n\nPlease let me know what you think!\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Fri, 17 Nov 2023 10:39:58 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Thank you for continuing to review this submission! My changes are in\nthe v18 patch I sent a few days ago. Details below.\n\nOn Sun, Oct 29, 2023 at 5:01 PM jian he <jian.universality@gmail.com> wrote:\n> * The attached patch makes foreign keys with PERIOD fail if any of the\n> foreign key columns is \"generated columns\".\n\nI don't see anything like that included in your attachment. I do see\nthe restriction on `ON DELETE SET NULL/DEFAULT (columnlist)`, which I\nincluded. But you are referring to something else I take it? Why do\nyou think FKs should fail if the referred column is GENERATED? Is that\na restriction you think should apply to all FKs or only temporal ones?\n\n> * The following queries will cause segmentation fault. not sure the\n> best way to fix it.\n> . . .\n> CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at)\n> REFERENCES temporal3 (id, valid_at)\n> );\n\nFixed, with additional tests re PERIOD on one side but not the other.\n\n> * change the function FindFKComparisonOperators's \"eqstrategy\" to\n> make pg_constraint record correct {conpfeqop,conppeqop,conffeqop}.\n\nThis change is incorrect because it causes foreign keys to fail when\ncreated with btree_gist. See my reply to Peter for more about that. My\nv18 patch also includes some new (very simple) tests in the btree_gist\nextension so it's easier to see whether temporal PKs & FKs work there.\n\n> * fix the ON DELETE SET NULL/DEFAULT (columnlist). Now the following\n> queries error will be more consistent.\n> ALTER TABLE temporal_fk_rng2rng DROP CONSTRAINT temporal_fk_rng2rng_fk,\n> ADD CONSTRAINT temporal_fk_rng2rng_fk\n> FOREIGN KEY (parent_id, PERIOD valid_at) REFERENCES temporal_rng\n> ON DELETE SET DEFAULT(valid_at);\n> --ON DELETE SET NULL(valid_at);\n\nOkay, thanks!\n\n> * refactor restrict_cascading_range function.\n\nIt looks like your attachment only renames the column, but I think\n\"restrict\" is more expressive and accurate than \"get\", so I'd like to\nkeep the original name here.\n\n> * you did if (numfks != numpks) before if (is_temporal) {numfks +=\n> 1;}, So I changed the code order to make the error report more\n> consistent.\n\nSince we do numfks +=1 and numpks +=1, I don't see any inconsistency\nhere. Also you are making things now happen before a permissions\ncheck, which may be important (I'm not sure). Can you explain what\nimprovement is intended here? Your changes don't seem to cause any\nchanges in the tests, so what is the goal? Perhaps I'm\nmisunderstanding what you mean by \"more consistent.\"\n\nThanks! I'll reply to your Nov 6 email separately.\n\nYours,\n\n--\nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Sat, 18 Nov 2023 21:24:09 -0800", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Mon, Nov 6, 2023 at 11:07 PM jian he <jian.universality@gmail.com> wrote:\n> + </para>\n> + In a temporal foreign key, the delete/update will use\n> + <literal>FOR PORTION OF</literal> semantics to constrain the\n> + effect to the bounds being deleted/updated in the referenced row.\n> + </para>\n>\n> The first \"para\" should be <para> ?\n\nThanks, fixed (in v18)!\n\n> There are many warnings after #define WRITE_READ_PARSE_PLAN_TREES\n> see: http://cfbot.cputube.org/highlights/all.html#4308\n> Does that mean oue new change in gram.y is somehow wrong?\n\nFixed (in read+out node funcs).\n\n> sgml/html/sql-update.html:\n> \"range_or_period_name\n> The range column or period to use when performing a temporal update.\n> This must match the range or period used in the table's temporal\n> primary key.\"\n>\n> Is the second sentence unnecessary? since no primary can still do \"for\n> portion of update\".\n\nYou're right, this dates back to an older version of the patch. Removed.\n\n> sgml/html/sql-update.html:\n> \"start_time\n> The earliest time (inclusive) to change in a temporal update. This\n> must be a value matching the base type of the range or period from\n> range_or_period_name. It may also be the special value MINVALUE to\n> indicate an update whose beginning is unbounded.\"\n>\n> probably something like the following:\n> \"lower_bound\"\n> The lower bound (inclusive) to change in an overlap update. This must\n> be a value matching the base type of the range or period from\n> range_or_period_name. It may also be the special value UNBOUNDED to\n> indicate an update whose beginning is unbounded.\"\n>\n> Obviously the \"start_time\" reference also needs to change, and the\n> sql-delete.html reference also needs to change.\n\nSee below re UNBOUNDED....\n\n> UPDATE for_portion_of_test FOR PORTION OF valid_at FROM NULL TO\n> \"unbounded\" SET name = 'NULL to NULL';\n> should fail, but not. double quoted unbounded is a column reference, I assume.\n>\n> That's why I am confused with the function transformForPortionOfBound.\n> \"if (nodeTag(n) == T_ColumnRef)\" part.\n\nYou're right, using a ColumnDef was probably not good here, and\ntreating `\"UNBOUNDED\"` (with quotes from the user) as a keyword is no\ngood. I couldn't find a way to make this work without reduce/reduce\nconflicts, so I just took it out. It was syntactic sugar for `FROM/TO\nNULL` and not part of the standard, so it's not too important. Also I\nsee that UNBOUNDED causes difficult problems already with window\nfunctions (comments in gram.y). I hope I can find a way to make this\nwork eventually, but it can go for now.\n\n> in create_table.sgml. you also need to add WITHOUT OVERLAPS related\n> info into <varlistentry id=\"sql-createtable-parms-unique\">\n\nYou're right, fixed (though Peter's patch then changed this same spot).\n\nThanks,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Sat, 18 Nov 2023 21:32:52 -0800", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Sun, Nov 19, 2023 at 1:24 PM Paul A Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> Thank you for continuing to review this submission! My changes are in\n> the v18 patch I sent a few days ago. Details below.\n>\n> On Sun, Oct 29, 2023 at 5:01 PM jian he <jian.universality@gmail.com> wrote:\n> > * The attached patch makes foreign keys with PERIOD fail if any of the\n> > foreign key columns is \"generated columns\".\n>\n> I don't see anything like that included in your attachment. I do see\n> the restriction on `ON DELETE SET NULL/DEFAULT (columnlist)`, which I\n> included. But you are referring to something else I take it? Why do\n> you think FKs should fail if the referred column is GENERATED? Is that\n> a restriction you think should apply to all FKs or only temporal ones?\n>\n\nI believe the following part should fail. Similar tests on\nsrc/test/regress/sql/generated.sql. line begin 347.\n\ndrop table if exists gtest23a,gtest23x cascade;\nCREATE TABLE gtest23a (x int4range, y int4range,\nCONSTRAINT gtest23a_pk PRIMARY KEY (x, y WITHOUT OVERLAPS));\nCREATE TABLE gtest23x (a int4range, b int4range GENERATED ALWAYS AS\n('empty') STORED,\nFOREIGN KEY (a, PERIOD b ) REFERENCES gtest23a(x, PERIOD y) ON UPDATE\nCASCADE); -- should be error?\n-------\n\n>\n> > * you did if (numfks != numpks) before if (is_temporal) {numfks +=\n> > 1;}, So I changed the code order to make the error report more\n> > consistent.\n>\n> Since we do numfks +=1 and numpks +=1, I don't see any inconsistency\n> here. Also you are making things now happen before a permissions\n> check, which may be important (I'm not sure). Can you explain what\n> improvement is intended here? Your changes don't seem to cause any\n> changes in the tests, so what is the goal? Perhaps I'm\n> misunderstanding what you mean by \"more consistent.\"\n>\n\nbegin;\ndrop table if exists fk, pk cascade;\nCREATE TABLE pk (id int4range, valid_at int4range,\nCONSTRAINT pk_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS)\n);\nCREATE TABLE fk (\nid int4range,valid_at tsrange, parent_id int4range,\nCONSTRAINT fk FOREIGN KEY (parent_id, valid_at)\n REFERENCES pk\n);\nrollback;\n--\nthe above query will return an error: number of referencing and\nreferenced columns for foreign key disagree.\nbut if you look at it closely, primary key and foreign key columns both are two!\nThe error should be saying valid_at should be specified with \"PERIOD\".\n\nbegin;\ndrop table if exists fk, pk cascade;\nCREATE TABLE pk (id int4range, valid_at int4range,\nCONSTRAINT pk_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS)\n);\nCREATE TABLE fk (\nid int4range,valid_at int4range, parent_id int4range,\nCONSTRAINT fk FOREIGN KEY (parent_id, period valid_at)\nREFERENCES pk\n);\nselect conname,array_length(conkey,1),array_length(confkey,1)\nfrom pg_constraint where conname = 'fk';\nrollback;\n------------\nI found out other issues in v18.\nI first do `git apply` then `git diff --check`, there is a white\nspace error in v18-0005.\n\nYou also need to change update.sgml and delete.sgml <title>Outputs</title> part.\nSince at most, it can return 'UPDATE 3' or 'DELETE 3'.\n\n--the following query should work?\ndrop table pk;\nCREATE table pk(a numrange PRIMARY key,b text);\ninsert into pk values('[1,10]');\ncreate or replace function demo1() returns void as $$\ndeclare lb numeric default 1; up numeric default 3;\nbegin\n update pk for portion of a from lb to up set b = 'lb_to_up';\n return;\nend\n$$ language plpgsql;\nselect * from demo1();\n\n\n", "msg_date": "Mon, 20 Nov 2023 14:57:37 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 17.11.23 19:39, Paul Jungwirth wrote:\n> But I feel the overall approach is wrong: originally I used hardcoded \n> \"=\" and \"&&\" operators, and you asked me to look them up by strategy \n> number instead. But that leads to trouble with core gist types vs \n> btree_gist types. The core gist opclasses use RT*StrategyNumbers, but \n> btree_gist creates opclasses with BT*StrategyNumbers.\n\nOuch.\n\nThat also provides the answer to my question #2 here: \nhttps://www.postgresql.org/message-id/6f010a6e-8e20-658b-dc05-dc9033a694da%40eisentraut.org\n\nI don't have a good idea about this right now. Could we just change \nbtree_gist perhaps? Do we need a new API for this somewhere?\n\n\n\n\n", "msg_date": "Mon, 20 Nov 2023 08:58:55 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 20.11.23 08:58, Peter Eisentraut wrote:\n> On 17.11.23 19:39, Paul Jungwirth wrote:\n>> But I feel the overall approach is wrong: originally I used hardcoded \n>> \"=\" and \"&&\" operators, and you asked me to look them up by strategy \n>> number instead. But that leads to trouble with core gist types vs \n>> btree_gist types. The core gist opclasses use RT*StrategyNumbers, but \n>> btree_gist creates opclasses with BT*StrategyNumbers.\n> \n> Ouch.\n> \n> That also provides the answer to my question #2 here: \n> https://www.postgresql.org/message-id/6f010a6e-8e20-658b-dc05-dc9033a694da%40eisentraut.org\n> \n> I don't have a good idea about this right now.  Could we just change \n> btree_gist perhaps?  Do we need a new API for this somewhere?\n\nAfter further thought, I think the right solution is to change \nbtree_gist (and probably also btree_gin) to use the common RT* strategy \nnumbers. The strategy numbers are the right interface to determine the \nsemantics of index AM operators. It's just that until now, nothing \nexternal has needed this information from gist indexes (unlike btree, \nhash), so it has been a free-for-all.\n\nI don't see an ALTER OPERATOR CLASS command that could be used to \nimplement this. Maybe we could get away with a direct catalog UPDATE. \nOr we need to make some DDL for this.\n\nAlternatively, this could be the time to reconsider moving this into core.\n\n\n", "msg_date": "Thu, 23 Nov 2023 10:08:54 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Thank you again for such thorough reviews!\n\nOn Thu, Nov 16, 2023 at 11:12 PM jian he <jian.universality@gmail.com> wrote:\n > UPDATE FOR PORTION OF, may need insert privilege. We also need to document this.\n > Similarly, we also need to apply the above logic to DELETE FOR PORTION OF.\n\nI don't think UPDATE/DELETE FOR PORTION OF is supposed to require INSERT permission.\n\nNotionally the INSERTs are just to preserve what was there already, not to add new data.\nThe idea is that a temporal table is equivalent to a table with one row for every \"instant\",\ni.e. one row per microsecond/second/day/whatever-time-resolution. Of course that would be too slow,\nso we use PERIODs/ranges instead, but the behavior should be the same. Date's book has a good \ndiscussion of this idea.\n\nI also checked the SQL:2011 draft standard, and there is a section called Access Rules in Part 2: \nSQL/Foundation for UPDATE and DELETE statements. Those sections say you need UPDATE/DELETE \nprivileges, but say nothing about needing INSERT privileges. That is on page 949 and 972 of the PDFs \nfrom the \"SQL:20nn Working Draft Documents\" link at [1]. If someone has a copy of SQL:2016 maybe \nsomething was changed, but I would be surprised.\n\nI also checked MariaDB and IBM DB2, the only two RDBMSes that implement FOR PORTION OF to my \nknowledge. (It is not in Oracle or MSSQL.) I created a table with one row, then gave another user \nprivileges to SELECT & UPDATE, but not INSERT. In both cases, that user could execute an UPDATE FOR \nPORTION OF that resulted in new rows, but could not INSERT genuinely new rows. [2,3]\n\nSo instead of changing this I've updated the documentation to make it explicit that you do not need \nINSERT privilege to use FOR PORTION OF. I also documented which triggers will fire and in which order.\n\n > + <para>\n > + If the table has a <link\n > linkend=\"ddl-periods-application-periods\">range column\n > + or <literal>PERIOD</literal></link>, you may supply a\n >\n > should be\n >\n > + <para>\n > + If the table has a range column or <link\n > linkend=\"ddl-periods-application-periods\">\n > + <literal>PERIOD</literal></link>, you may supply a\n >\n > similarly the doc/src/sgml/ref/delete.sgml the link reference also broken.\n\nOkay, changed.\n\n > \"given interval\", \"cut off\" these words, imho, feel not so clear.\n > We also need a document that:\n > \"UPDATE FOR PORTION OF\" is UPDATE and INSERT (if overlaps).\n > If the \"UPDATE FOR PORTION OF\" range overlaps then\n > It will invoke triggers in the following order: before row update,\n > before row insert, after row insert. after row update.\n\nOkay, reworked the docs for this.\n\n > src/test/regress/sql/for_portion_of.sql\n > You only need to create two triggers?\n > since for_portion_of_trigger only raises notice to output the triggers\n > meta info.\n\nChanged.\n\nv19 patch series attached, rebased to a11c9c42ea.\n\n\n\n[1] https://web.archive.org/web/20230923221106/https://www.wiscorp.com/SQLStandards.html\n\n[2] MariaDB test:\n\nFirst create a table as the root user:\n\n```\ncreate table t (id int, ds date, de date, name text, period for valid_at (ds, de));\ninsert into t values (1, '2000-01-01', '2001-01-01', 'foo');\n```\n\nand give another user select & update privlege (but not insert):\n\n```\ncreate database paul;\nuse paul;\ncreate user 'update_only'@'localhost' identified by 'test';\ngrant select, update on paul.t to 'update_only'@'localhost';\nflush privileges;\n```\n\nNow as that user:\n\n```\nmysql -uupdate_only -p\nuse paul;\n-- We can update the whole record:\nupdate t for portion of valid_at from '2000-01-01' to '2001-01-01' set name = 'bar';\n-- We can update a part of the record:\nupdate t for portion of valid_at from '2000-01-01' to '2000-07-01' set name = 'baz';\nselect * from t;\n+------+------------+------------+------+\n| id | ds | de | name |\n+------+------------+------------+------+\n| 1 | 2000-01-01 | 2000-07-01 | baz |\n| 1 | 2000-07-01 | 2001-01-01 | bar |\n+------+------------+------------+------+\n-- We cannot insert:\ninsert into t values (2, '2000-01-01', '2001-01-01' 'another');\nERROR 1142 (42000): INSERT command denied to user 'update_only'@'localhost' for table `paul`.`t`\n```\n\n[3] IBM DB2 test:\n\n```\nmkdir ~/local/db2\ncd ~/local/db2\ntar xzvf ~/Downloads/v11.5.9_linuxx64_server_dec.tar.gz\ncd server_dev\n./db2_install # should put something at ~/sqllib\nsource ~/sqllib/db2profile\ndb2start # but I got \"The database manager is already active.\"\ndb2\ncreate database paul -- first time only, note no semicolon\nconnect to paul\ncreate table t (id integer, ds date not null, de date not null, name varchar(4000), period \nbusiness_time (ds, de));\ninsert into t values (1, '2000-01-01', '2001-01-01', 'foo');\ngrant connect on database to user james;\ngrant select, update on t to user james;\n```\n\nNow as james:\n\n```\nsource ~paul/sqllib/db2profile\ndb2\nconnect to paul\nselect * from paul.t;\nupdate paul.t for portion of business_time from '2000-01-01' to '2000-06-01' set name = 'bar';\nDB20000I The SQL command completed successfully.\nselect * from paul.t;\ninsert into paul.t values (2, '2000-01-01', '2001-01-01', 'bar');\nDB21034E The command was processed as an SQL statement because it was not a\nvalid Command Line Processor command. During SQL processing it returned:\nSQL0551N The statement failed because the authorization ID does not have the\nrequired authorization or privilege to perform the operation. Authorization\nID: \"JAMES\". Operation: \"INSERT\". Object: \"PAUL.T\". SQLSTATE=42501\n```\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Sat, 2 Dec 2023 10:11:52 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Thu, Nov 23, 2023 at 1:08 AM Peter Eisentraut <peter@eisentraut.org> wrote:\n > After further thought, I think the right solution is to change\n > btree_gist (and probably also btree_gin) to use the common RT* strategy\n > numbers.\n\nOkay. That will mean bumping the version of btree_gist, and you must be running that version to use \nthe new temporal features, or you will get silly results. Right? Is there a way to protect users \nagainst that and communicate they need to upgrade the extension?\n\nThis also means temporal features may not work in custom GIST opclasses. What we're saying is they \nmust have an appropriate operator for RTEqualStrategyNumber (18) and RTOverlapStrategyNumber (3). \nEqual matters for the scalar key part(s), overlap for the range part. So equal is more likely to be \nan issue, but overlap matters if we want to support non-ranges (which I'd say is worth doing).\n\nAlso if they get it wrong, we won't really have any way to report an error.\n\nI did some research on other extensions in contrib, as well as PostGIS. Here is what I found:\n\n## btree_gin:\n\n3 is =\n18 is undefined\n\nsame for all types: macaddr8, int2, int4, int8, float4, float8, oid, timestamp, timestamptz, time, \ntimetz, date, interval, inet, cidr, text, varchar, char, bytea, bit, varbit, numeric, anyenum, uuid, \nname, bool, bpchar\n\n## cube\n\n3 is &&\n18 is <=>\n\n## intarray\n\n3 is &&\n18 is undefined\n\n## ltree\n\n3 is =\n18 is undefined\n\n## hstore\n\n3 and 18 are undefined\n\n## seg\n\n3 is &&\n18 is undefined\n\n## postgis: geometry\n\n3 is &&\n18 is undefined\n\n## postgis: geometry_nd\n\n3 is &&&\n18 is undefined\n\nI thought about looking through pgxn for more, but I haven't yet. I may still do that.\nBut already it seems like there is not much consistency.\n\nSo what do you think of this idea instead?:\n\nWe could add a new (optional) support function to GiST that translates \"well-known\" strategy numbers \ninto the opclass's own strategy numbers. This would be support function 12. Then we can say \ntranslateStrategyNumber(RTEqualStrategyNumber) and look up the operator with the result.\n\nThere is not a performance hit, because we do this for the DDL command (create pk/uq/fk), then store \nthe operator in the index/constraint.\n\nIf you don't provide this new support function, then creating the pk/uq/fk fails with a hint about \nwhat you can do to make it work.\n\nThis approach means we don't change the rules about GiST opclasses: you can still use the stranums \nhow you like.\n\nThis function would also let me support non-range \"temporal\" foreign keys, where I'll need to build \nqueries with && and maybe other operators.\n\nWhat do you think?\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Sat, 2 Dec 2023 10:41:01 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 12/2/23 19:11, Paul Jungwirth wrote:\n> Thank you again for such thorough reviews!\n> \n> On Thu, Nov 16, 2023 at 11:12 PM jian he <jian.universality@gmail.com> \n> wrote:\n> > UPDATE FOR PORTION OF, may need insert privilege. We also need to \n> document this.\n> > Similarly, we also need to apply the above logic to DELETE FOR \n> PORTION OF.\n> \n> I don't think UPDATE/DELETE FOR PORTION OF is supposed to require INSERT \n> permission.\n> \n> Notionally the INSERTs are just to preserve what was there already, not \n> to add new data.\n> The idea is that a temporal table is equivalent to a table with one row \n> for every \"instant\",\n> i.e. one row per microsecond/second/day/whatever-time-resolution. Of \n> course that would be too slow,\n> so we use PERIODs/ranges instead, but the behavior should be the same. \n> Date's book has a good discussion of this idea.\n> \n> I also checked the SQL:2011 draft standard, and there is a section \n> called Access Rules in Part 2: SQL/Foundation for UPDATE and DELETE \n> statements. Those sections say you need UPDATE/DELETE privileges, but \n> say nothing about needing INSERT privileges. That is on page 949 and 972 \n> of the PDFs from the \"SQL:20nn Working Draft Documents\" link at [1]. If \n> someone has a copy of SQL:2016 maybe something was changed, but I would \n> be surprised\n\nNothing has changed here in SQL:2023 (or since).\n-- \nVik Fearing\n\n\n\n", "msg_date": "Sun, 3 Dec 2023 11:13:57 +0100", "msg_from": "Vik Fearing <vik@postgresfriends.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 02.12.23 19:41, Paul Jungwirth wrote:\n> So what do you think of this idea instead?:\n> \n> We could add a new (optional) support function to GiST that translates \n> \"well-known\" strategy numbers into the opclass's own strategy numbers. \n> This would be support function 12. Then we can say \n> translateStrategyNumber(RTEqualStrategyNumber) and look up the operator \n> with the result.\n> \n> There is not a performance hit, because we do this for the DDL command \n> (create pk/uq/fk), then store the operator in the index/constraint.\n> \n> If you don't provide this new support function, then creating the \n> pk/uq/fk fails with a hint about what you can do to make it work.\n> \n> This approach means we don't change the rules about GiST opclasses: you \n> can still use the stranums how you like.\n> \n> This function would also let me support non-range \"temporal\" foreign \n> keys, where I'll need to build queries with && and maybe other operators.\n\nI had some conversations about this behind the scenes. I think this \nidea makes sense.\n\nThe other idea was that we create new strategy numbers, like \nTemporalEqualsStrategy / TemporalOverlapsStrategy. But then you'd have \nthe situation where some strategy numbers are reserved and others are \nnot, so perhaps that is not so clean. I think your idea is good.\n\n\n\n", "msg_date": "Wed, 6 Dec 2023 09:59:33 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Sun, Dec 3, 2023 at 2:11 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> v19 patch series attached, rebased to a11c9c42ea.\n>\n\nthis TODO:\n * TODO: It sounds like FOR PORTION OF might need to do something here too?\nbased on comments on ExprContext. I refactor a bit, and solved this TODO.\n\ntring to the following TODO:\n// TODO: Need to save context->mtstate->mt_transition_capture? (See\ncomment on ExecInsert)\n\nbut failed.\nI also attached the trial, and also added the related test.\n\nYou can also use the test to check portion update with insert trigger\nwith \"referencing old table as old_table new table as new_table\"\nsituation.", "msg_date": "Wed, 6 Dec 2023 21:22:07 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "hi. some small issues....\n\ndiff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c\nindex e3ccf6c7f7..6781e55020 100644\n--- a/src/backend/tcop/utility.c\n+++ b/src/backend/tcop/utility.c\n@@ -1560,7 +1560,7 @@ ProcessUtilitySlow(ParseState *pstate,\n true, /* check_rights */\n true, /* check_not_in_use */\n false, /* skip_build */\n- false); /* quiet */\n+ false); /* quiet */\n\nIs the above part unnecessary?\n\ndiff --git a/src/backend/utils/adt/Makefile b/src/backend/utils/adt/Makefile\nindex 199eae525d..d04c75b398 100644\n--- a/src/backend/utils/adt/Makefile\n+++ b/src/backend/utils/adt/Makefile\n@@ -78,6 +78,7 @@ OBJS = \\\n oracle_compat.o \\\n orderedsetaggs.o \\\n partitionfuncs.o \\\n+ period.o \\\n pg_locale.o \\\n pg_lsn.o \\\n pg_upgrade_support.o \\\ndiff --git a/src/backend/utils/adt/period.c b/src/backend/utils/adt/period.c\nnew file mode 100644\nindex 0000000000..0ed4304e16\n--- /dev/null\n+++ b/src/backend/utils/adt/period.c\n@@ -0,0 +1,56 @@\n+/*-------------------------------------------------------------------------\n+ *\n+ * period.c\n+ * Functions to support periods.\n+ *\n+ *\n+ * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group\n+ * Portions Copyright (c) 1994, Regents of the University of California\n+ *\n+ *\n+ * IDENTIFICATION\n+ * src/backend/utils/adt/period.c\n+ *\n+ *-------------------------------------------------------------------------\n+ */\n+#include \"postgres.h\"\n+\n+#include \"executor/tuptable.h\"\n+#include \"fmgr.h\"\n+#include \"nodes/primnodes.h\"\n+#include \"utils/fmgrprotos.h\"\n+#include \"utils/period.h\"\n+#include \"utils/rangetypes.h\"\n+\n+Datum period_to_range(TupleTableSlot *slot, int startattno, int\nendattno, Oid rangetype)\n+{\n+ Datum startvalue;\n+ Datum endvalue;\n+ Datum result;\n+ bool startisnull;\n+ bool endisnull;\n+ LOCAL_FCINFO(fcinfo, 2);\n+ FmgrInfo flinfo;\n+ FuncExpr *f;\n+\n+ InitFunctionCallInfoData(*fcinfo, &flinfo, 2, InvalidOid, NULL, NULL);\n+ f = makeNode(FuncExpr);\n+ f->funcresulttype = rangetype;\n+ flinfo.fn_expr = (Node *) f;\n+ flinfo.fn_extra = NULL;\n+\n+ /* compute oldvalue */\n+ startvalue = slot_getattr(slot, startattno, &startisnull);\n+ endvalue = slot_getattr(slot, endattno, &endisnull);\n+\n+ fcinfo->args[0].value = startvalue;\n+ fcinfo->args[0].isnull = startisnull;\n+ fcinfo->args[1].value = endvalue;\n+ fcinfo->args[1].isnull = endisnull;\n+\n+ result = range_constructor2(fcinfo);\n+ if (fcinfo->isnull)\n+ elog(ERROR, \"function %u returned NULL\", flinfo.fn_oid);\n+\n+ return result;\n+}\n\nI am confused. so now I only apply v19, 0001 to 0003.\nperiod_to_range function never used. maybe we can move this part to\n0005-Add PERIODs.patch?\nAlso you add change in Makefile in 0003, meson.build change in 0005,\nbetter put it on in 0005?\n\ndiff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y\nindex 5b110ca7fe..d54d84adf6 100644\n--- a/src/backend/parser/gram.y\n+++ b/src/backend/parser/gram.y\n\n+/*\n+ * We need to handle this shift/reduce conflict:\n+ * FOR PORTION OF valid_at FROM INTERVAL YEAR TO MONTH TO foo.\n+ * This is basically the classic \"dangling else\" problem, and we want a\n+ * similar resolution: treat the TO as part of the INTERVAL, not as part of\n+ * the FROM ... TO .... Users can add parentheses if that's a problem.\n+ * TO just needs to be higher precedence than YEAR_P etc.\n+ * TODO: I need to figure out a %prec solution before this gets committed!\n+ */\n+%nonassoc YEAR_P MONTH_P DAY_P HOUR_P MINUTE_P\n+%nonassoc TO\n\nthis part will never happen?\nsince \"FROM INTERVAL YEAR TO MONTH TO\"\nmeans \"valid_at\" will be interval range data type, which does not exist now.\n\n ri_PerformCheck(riinfo, &qkey, qplan,\n fk_rel, pk_rel,\n oldslot, NULL,\n+ targetRangeParam, targetRange,\n true, /* must detect new rows */\n SPI_OK_SELECT);\n\n@@ -905,6 +922,7 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)\n ri_PerformCheck(riinfo, &qkey, qplan,\n fk_rel, pk_rel,\n oldslot, NULL,\n+ -1, 0,\n true, /* must detect new rows */\n SPI_OK_DELETE);\n\n@@ -1026,6 +1044,7 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)\n ri_PerformCheck(riinfo, &qkey, qplan,\n fk_rel, pk_rel,\n oldslot, newslot,\n+ -1, 0,\n true, /* must detect new rows */\n SPI_OK_UPDATE);\n\n@@ -1258,6 +1277,7 @@ ri_set(TriggerData *trigdata, bool is_set_null,\nint tgkind)\n ri_PerformCheck(riinfo, &qkey, qplan,\n fk_rel, pk_rel,\n oldslot, NULL,\n+ -1, 0,\n true, /* must detect new rows */\n SPI_OK_UPDATE);\n\n@@ -2520,6 +2540,7 @@ ri_PerformCheck(const RI_ConstraintInfo *riinfo,\n RI_QueryKey *qkey, SPIPlanPtr qplan,\n Relation fk_rel, Relation pk_rel,\n TupleTableSlot *oldslot, TupleTableSlot *newslot,\n+ int forPortionOfParam, Datum forPortionOf,\n bool detectNewRows, int expect_OK)\n\nfor all the refactor related to ri_PerformCheck, do you need (Datum) 0\ninstead of plain 0?\n\n+ <para>\n+ If the table has a range column or\n+ <link linkend=\"ddl-periods-application-periods\"><literal>PERIOD</literal></link>,\n+ you may supply a <literal>FOR PORTION OF</literal> clause, and\nyour delete will\n+ only affect rows that overlap the given interval. Furthermore, if\na row's span\n\nhttps://influentialpoints.com/Training/basic_statistics_ranges.htm#:~:text=A%20range%20is%20two%20numbers,or%20the%20difference%20between%20them\nSo \"range\" is more accurate than \"interval\"?\n\n+/* ----------\n+ * ForPortionOfState()\n+ *\n+ * Copies a ForPortionOfState into the current memory context.\n+ */\n+static ForPortionOfState *\n+CopyForPortionOfState(ForPortionOfState *src)\n+{\n+ ForPortionOfState *dst = NULL;\n+ if (src) {\n+ MemoryContext oldctx;\n+ RangeType *r;\n+ TypeCacheEntry *typcache;\n+\n+ /*\n+ * Need to lift the FOR PORTION OF details into a higher memory context\n+ * because cascading foreign key update/deletes can cause triggers to fire\n+ * triggers, and the AfterTriggerEvents will outlive the FPO\n+ * details of the original query.\n+ */\n+ oldctx = MemoryContextSwitchTo(TopTransactionContext);\n\nshould it be \"Copy a ForPortionOfState into the TopTransactionContext\"?\n\n\n", "msg_date": "Mon, 11 Dec 2023 16:31:24 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Wed, Dec 6, 2023 at 12:59 AM Peter Eisentraut <peter@eisentraut.org> wrote:\n >\n > On 02.12.23 19:41, Paul Jungwirth wrote:\n > > So what do you think of this idea instead?:\n > >\n > > We could add a new (optional) support function to GiST that translates\n > > \"well-known\" strategy numbers into the opclass's own strategy numbers.\n >\n > I had some conversations about this behind the scenes.  I think this\n > idea makes sense.\n\nHere is a patch series with the GiST stratnum support function added. I put this into a separate \npatch (before all the temporal ones), so it's easier to review. Then in the PK patch (now #2) we \ncall that function to figure out the = and && operators. I think this is a big improvement.\n\nI provide a couple \"example\" implementations:\n\n- An identity function that returns whatever you give it. The core gist opclasses use this since \nthey use the RT* constants. Even though not all opclasses support all strategies, it is okay to \nreturn a stratnum with no amop entry. You will just get an error when you try to make a temporal PK \nwith that type as the WITHOUT OVERLAPS part (which is natural for the types we're talking about).\n\n- A function that translates RT*StrategyNumbers to BT*StrategyNumbers when possible (just \n=/</<=/>/>=, and we really only need =). This is what the btree_gist opclasses use. (No other \nRT*StrategyNumber can be translated, which means you can only use these types for the non-WIHOUT \nOVERLAPS part, but again that is natural.)\n\nI didn't add a similar function to GIN. It's not possible to use GIN for temporal PKs, so I don't \nthink it makes sense.\n\n\n## Foreign Keys\n\nFor FKs, I need a couple similar things:\n\n- The ContainedBy operator (<@ for rangetypes).\n- An aggregate function to combine referenced rows (instead of hardcoding range_agg as before).\n\nI look up ContainedBy just as I'm doing with Equal & Overlap for PKs. The aggregate function is \nanother optional support function.\n\nI broke out that support function into another independent patch here. Then I updated by FKs patch \nto use it (and the ContainedBy operator).\n\n\n## FOR PORTION OF\n\nThen for FOR PORTION OF I need an intersect operator (*) and a new \"leftovers\" operator.\n\nWe have an intersect operator (for range & multirange at least), but no strategy number for it, thus \nno amop entry. My patch adds that, **but** it is neither a search operator ('s') nor ordering ('o'), \nso I've added a \"portion\" option ('p'). I'm not sure this is completely valid, since `FOR PORTION \nOF` is not really an *index* operation, but it does seem index-adjacent: you would only/usually use \nit on something with a temporal PK (which is an index). And it is an analogous situation, where \npg_amop entries tell us how to implement the extensible parts. But if all this seems like the wrong \napproach please let me know.\n\nThe new leftovers operator similarly has 'p' for amoppurpose and another amop entry.\n\nThe leftovers operator returns an array of T, where T is the type of the valid_at column. Then we'll \ninsert a new \"leftovers\" row for each array entry. So we aren't assuming only \"before\" and \"after\" \n(which doesn't work for multiranges or two-dimensional spaces as you'd have with bitemporal or spatial).\n\nBut now that \"leftovers\" are becoming more of an external-facing part of Postgres, I feel we should \nhave a less silly name. (That's too bad, because \"leftovers\" is memorable and non-ambiguous, and \ncomputer pioneers used all kinds of silly names, so if you tell me I don't have to be quite so \nprofessional, maybe I'll go back to it.) I considered things like \"without\" or \"multi-subtract\" or \n\"except\". I went with \"without portion\", which is nice because it evokes FOR PORTION OF and doesn't \nlimit the scope to rangetypes.\n\nFor the symbol I like `@-`. It conveys the similarity to subtraction, and \"@\" can be a mnemonic for \n\"array\". (Too bad we can't use `--`, ha ha.) I also thought about `@-@`, but that is used already by \npath_length and lseg_length, and maybe a non-commutative operator deserves a non-palindromic name.\n\nThe {multi,}range_without_portion procs & operators are broken out into a separate commit, and the \nFPO patch now uses them in the exec node. It always made me a little uneasy to have rangetype code \nin nodeModifyTable.c, and now it's gone.\n\nThen the last thing I need for FOR PORTION OF is a \"constructor\". In SQL:2011 you use `FOR PORTION \nOF valid_at FROM '2000-01-01' TO '2010-01-01'`. But FROM and TO don't really work for non-range \ntypes. So I added an alternate syntax that is `FOR PORTION OF valid_at \n(tsmultirange(tsrange('2001-01-01', '2002-02-02'), tsrange('2003-03-03', '2004-04-04')))`. In other \nwords parens wrapping a value of the type you're using. I still support FROM & TO for building a \nrange type, so we follow the standard.\n\nThat's it for now. Multiranges should be fully supported (but need lots more tests), as well as \ncustom types. I've updated some of the docs, but I need to go through them and clarify where things \ndon't necessarily have to be ranges.\n\nRebased to cb44a8345e.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Sun, 31 Dec 2023 00:51:49 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 12/31/23 00:51, Paul Jungwirth wrote:\n> That's it for now.\n\nHere is another update. I fixed FOR PORTION OF on partitioned tables, in particular when the attnums \nare different from the root partition.\n\nRebased to cea89c93a1.\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Mon, 1 Jan 2024 17:59:47 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Tue, Jan 2, 2024 at 9:59 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On 12/31/23 00:51, Paul Jungwirth wrote:\n> > That's it for now.\n>\n> Here is another update. I fixed FOR PORTION OF on partitioned tables, in particular when the attnums\n> are different from the root partition.\n>\n> Rebased to cea89c93a1.\n>\n\nHi.\n\n+/*\n+ * range_without_portion_internal - Sets outputs and outputn to the ranges\n+ * remaining and their count (respectively) after subtracting r2 from r1.\n+ * The array should never contain empty ranges.\n+ * The outputs will be ordered. We expect that outputs is an array of\n+ * RangeType pointers, already allocated with two slots.\n+ */\n+void\n+range_without_portion_internal(TypeCacheEntry *typcache, RangeType *r1,\n+ RangeType *r2, RangeType **outputs, int *outputn)\n+{\n+ int cmp_l1l2,\n+ cmp_l1u2,\n+ cmp_u1l2,\n+ cmp_u1u2;\n+ RangeBound lower1,\n+ lower2;\n+ RangeBound upper1,\n+ upper2;\n+ bool empty1,\n+ empty2;\n+\n+ range_deserialize(typcache, r1, &lower1, &upper1, &empty1);\n+ range_deserialize(typcache, r2, &lower2, &upper2, &empty2);\n+\n+ if (empty1)\n+ {\n+ /* if r1 is empty then r1 - r2 is empty, so return zero results */\n+ *outputn = 0;\n+ return;\n+ }\n+ else if (empty2)\n+ {\n+ /* r2 is empty so the result is just r1 (which we know is not empty) */\n+ outputs[0] = r1;\n+ *outputn = 1;\n+ return;\n+ }\n+\n+ /*\n+ * Use the same logic as range_minus_internal,\n+ * but support the split case\n+ */\n+ cmp_l1l2 = range_cmp_bounds(typcache, &lower1, &lower2);\n+ cmp_l1u2 = range_cmp_bounds(typcache, &lower1, &upper2);\n+ cmp_u1l2 = range_cmp_bounds(typcache, &upper1, &lower2);\n+ cmp_u1u2 = range_cmp_bounds(typcache, &upper1, &upper2);\n+\n+ if (cmp_l1l2 < 0 && cmp_u1u2 > 0)\n+ {\n+ lower2.inclusive = !lower2.inclusive;\n+ lower2.lower = false; /* it will become the upper bound */\n+ outputs[0] = make_range(typcache, &lower1, &lower2, false, NULL);\n+\n+ upper2.inclusive = !upper2.inclusive;\n+ upper2.lower = true; /* it will become the lower bound */\n+ outputs[1] = make_range(typcache, &upper2, &upper1, false, NULL);\n+\n+ *outputn = 2;\n+ }\n+ else if (cmp_l1u2 > 0 || cmp_u1l2 < 0)\n+ {\n+ outputs[0] = r1;\n+ *outputn = 1;\n+ }\n+ else if (cmp_l1l2 >= 0 && cmp_u1u2 <= 0)\n+ {\n+ *outputn = 0;\n+ }\n+ else if (cmp_l1l2 <= 0 && cmp_u1l2 >= 0 && cmp_u1u2 <= 0)\n+ {\n+ lower2.inclusive = !lower2.inclusive;\n+ lower2.lower = false; /* it will become the upper bound */\n+ outputs[0] = make_range(typcache, &lower1, &lower2, false, NULL);\n+ *outputn = 1;\n+ }\n+ else if (cmp_l1l2 >= 0 && cmp_u1u2 >= 0 && cmp_l1u2 <= 0)\n+ {\n+ upper2.inclusive = !upper2.inclusive;\n+ upper2.lower = true; /* it will become the lower bound */\n+ outputs[0] = make_range(typcache, &upper2, &upper1, false, NULL);\n+ *outputn = 1;\n+ }\n+ else\n+ {\n+ elog(ERROR, \"unexpected case in range_without_portion\");\n+ }\n+}\n\nI am confused.\nsay condition: \" (cmp_l1l2 <= 0 && cmp_u1l2 >= 0 && cmp_u1u2 <= 0)\"\nthe following code will only run PartA, never run PartB?\n\n`\nelse if (cmp_l1l2 >= 0 && cmp_u1u2 <= 0)\n PartA\nelse if (cmp_l1l2 <= 0 && cmp_u1l2 >= 0 && cmp_u1u2 <= 0)\n PartB\n`\n\nminimum example:\n#include<stdio.h>\n#include<string.h>\n#include<stdlib.h>\n#include<assert.h>\nint\nmain(void)\n{\n int cmp_l1l2;\n int cmp_u1u2;\n int cmp_u1l2;\n int cmp_l1u2;\n cmp_l1u2 = -1;\n cmp_l1l2 = 0;\n cmp_u1u2 = 0;\n cmp_u1l2 = 0;\n assert(cmp_u1l2 == 0);\nif (cmp_l1u2 > 0 || cmp_u1l2 < 0)\n printf(\"calling partA\\n\");\n else if (cmp_l1l2 >= 0 && cmp_u1u2 <= 0)\n printf(\"calling partB\\n\");\n else if (cmp_l1l2 <= 0 && cmp_u1l2 >= 0 && cmp_u1u2 <= 0)\n printf(\"calling partC\\n\");\n}\n\nI am confused with the name \"range_without_portion\", I think\n\"range_not_overlap\" would be better.\n\nselect numrange(1.1, 2.2) @- numrange(2.0, 3.0);\nthe result is not the same as\nselect numrange(2.0, 3.0) @- numrange(1.1, 2.2);\n\nSo your categorize oprkind as 'b' for operator \"@-\" is wrong?\nselect oprname,oprkind,oprcanhash,oprcanmerge,oprleft,oprright,oprresult,oprcode\nfrom pg_operator\nwhere oprname = '@-';\n\naslo\nselect count(*), oprkind from pg_operator group by oprkind;\nthere are only 5% are prefix operators.\nmaybe we should design it as:\n1. if both inputs are empty range, the result array is empty.\n2. if both inputs are non-empty and never overlaps, put both of them\nto the result array.\n3. if one input is empty another one is not, then put the non-empty\none into the result array.\n\nafter applying the patch: now the catalog data seems not correct to me.\nSELECT a1.amopfamily\n ,a1.amoplefttype::regtype\n ,a1.amoprighttype\n ,a1.amopstrategy\n ,amoppurpose\n ,amopsortfamily\n ,amopopr\n ,op.oprname\n ,am.amname\nFROM pg_amop as a1 join pg_operator op on op.oid = a1.amopopr\njoin pg_am am on am.oid = a1.amopmethod\nwhere amoppurpose = 'p';\noutput:\n amopfamily | amoplefttype | amoprighttype | amopstrategy |\namoppurpose | amopsortfamily | amopopr | oprname | amname\n------------+---------------+---------------+--------------+-------------+----------------+---------+---------+--------\n 2593 | box | 603 | 31 | p\n | 0 | 803 | # | gist\n 3919 | anyrange | 3831 | 31 | p\n | 0 | 3900 | * | gist\n 6158 | anymultirange | 4537 | 31 | p\n | 0 | 4394 | * | gist\n 3919 | anyrange | 3831 | 32 | p\n | 0 | 8747 | @- | gist\n 6158 | anymultirange | 4537 | 32 | p\n | 0 | 8407 | @- | gist\n(5 rows)\n\nselect oprcode, oprname, oprleft::regtype\nfrom pg_operator opr\nwhere opr.oprname in ('#','*','@-')\nand oprleft = oprright\nand oprleft in (603,3831,4537);\noutput:\n\n oprcode | oprname | oprleft\n----------------------------+---------+---------------\n box_intersect | # | box\n range_intersect | * | anyrange\n multirange_intersect | * | anymultirange\n range_without_portion | @- | anyrange\n multirange_without_portion | @- | anymultirange\n(5 rows)\n\nshould amoppurpose = 'p' is true apply to ' @-' operator?\n\ncatalog-pg-amop.html:\n`\namopsortfamily oid (references pg_opfamily.oid):\nThe B-tree operator family this entry sorts according to, if an\nordering operator; zero if a search operator\n`\nyou should also update the above entry, the amopsortfamily is also\nzero for \"portion operator\" for the newly implemented \"portion\noperator\".\n\n\nv21-0006-Add-UPDATE-DELETE-FOR-PORTION-OF.patch\n create mode 100644 src/backend/utils/adt/period.c\n create mode 100644 src/include/utils/period.h\nyou should put these two files to v21-0008-Add-PERIODs.patch.\nit's not related to that patch, it also makes people easy to review.\n\n\n", "msg_date": "Fri, 5 Jan 2024 13:06:28 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Getting caught up on reviews from November and December:\n\nOn 11/19/23 22:57, jian he wrote:\n >\n > I believe the following part should fail. Similar tests on\n > src/test/regress/sql/generated.sql. line begin 347.\n >\n > drop table if exists gtest23a,gtest23x cascade;\n > CREATE TABLE gtest23a (x int4range, y int4range,\n > CONSTRAINT gtest23a_pk PRIMARY KEY (x, y WITHOUT OVERLAPS));\n > CREATE TABLE gtest23x (a int4range, b int4range GENERATED ALWAYS AS\n > ('empty') STORED,\n > FOREIGN KEY (a, PERIOD b ) REFERENCES gtest23a(x, PERIOD y) ON UPDATE\n > CASCADE); -- should be error?\n\nOkay, I've added a restriction for temporal FKs too. But note this will\nchange once the PERIODs patch (the last one here) is finished. When the\ngenerated column is for a PERIOD, there will be logic to \"reroute\" the\nupdates to the constituent start/end columns instead.\n\n > begin;\n > drop table if exists fk, pk cascade;\n > CREATE TABLE pk (id int4range, valid_at int4range,\n > CONSTRAINT pk_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS)\n > );\n > CREATE TABLE fk (\n > id int4range,valid_at tsrange, parent_id int4range,\n > CONSTRAINT fk FOREIGN KEY (parent_id, valid_at)\n > REFERENCES pk\n > );\n > rollback;\n > --\n > the above query will return an error: number of referencing and\n > referenced columns for foreign key disagree.\n > but if you look at it closely, primary key and foreign key columns both are two!\n > The error should be saying valid_at should be specified with \"PERIOD\".\n\nAh okay, thanks for the clarification! This is tricky because the user\nleft out the PERIOD on the fk side, and left out the entire pk side, so\nthose columns are just implicit. So there is no PERIOD anywhere.\nBut I agree that if the pk has WITHOUT OVERLAPS, we should expect a\ncorresponding PERIOD modifier on the fk side and explain that that's\nwhat's missing. The attached patches include that.\n\n > I found out other issues in v18.\n > I first do `git apply` then `git diff --check`, there is a white\n > space error in v18-0005.\n\nFixed, thanks!\n\n > You also need to change update.sgml and delete.sgml <title>Outputs</title> part.\n > Since at most, it can return 'UPDATE 3' or 'DELETE 3'.\n\nThis doesn't sound correct to me. An UPDATE or DELETE can target many\nrows. Also I don't think the inserted \"leftovers\" should be included in\nthese counts. They represent the rows updated/deleted.\n\n > --the following query should work?\n > drop table pk;\n > CREATE table pk(a numrange PRIMARY key,b text);\n > insert into pk values('[1,10]');\n > create or replace function demo1() returns void as $$\n > declare lb numeric default 1; up numeric default 3;\n > begin\n > update pk for portion of a from lb to up set b = 'lb_to_up';\n > return;\n > end\n > $$ language plpgsql;\n > select * from demo1();\n\nHmm this is a tough one. It is correct that the `FROM __ TO __` values cannot be column references. \nThey are computed up front, not per row. One reason is they are used to search the table. In fact \nthe standard basically allows nothing but literal strings here. See section 14.14, page 971 then \nlook up <point in time> on page 348 and <datetime value expression> on page 308. The most \nflexibility you get is you can add/subtract an interval to the datetime literal. We are already well \npast that by allowing expressions, (certain) functions, parameters, etc.\n\nOTOH in your plpgsql example they are not really columns. They just get represented as ColumnRefs \nand then passed to transformColumnRef. I'm surprised plpgsql does it that way. As a workaround you \ncould use `EXECUTE format(...)`, but I'd love to make that work as you show instead. I'll keep \nworking on this one but it's not done yet. Perhaps I can move the restriction into \nanalysis/planning. If anyone has any advice it'd be welcome.\n\nOn 12/6/23 05:22, jian he wrote:\n > this TODO:\n > * TODO: It sounds like FOR PORTION OF might need to do something here too?\n > based on comments on ExprContext. I refactor a bit, and solved this TODO.\n\nThe patch looks wrong to me. We need to range targeted by `FROM __\nTO __` to live for the whole statement, not just one tuple (see just\nabove). That's why it gets computed in the Init function node.\n\nI don't think that TODO is needed anymore at all. Older versions of the\npatch had more expressions besides this one, and I think it was those I\nwas concerned about. So I've removed the comment here.\n\n > tring to the following TODO:\n > // TODO: Need to save context->mtstate->mt_transition_capture? (See\n > comment on ExecInsert)\n >\n > but failed.\n > I also attached the trial, and also added the related test.\n >\n > You can also use the test to check portion update with insert trigger\n > with \"referencing old table as old_table new table as new_table\"\n > situation.\n\nThank you for the test case! This is very helpful. So the problem is\n`referencing new table as new_table` gets lost. I don't have a fix yet\nbut I'll work on it.\n\nOn 12/11/23 00:31, jian he wrote:\n > - false); /* quiet */\n > + false); /* quiet */\n >\n > Is the above part unnecessary?\n\nGood catch! Fixed.\n\n > I am confused. so now I only apply v19, 0001 to 0003.\n > period_to_range function never used. maybe we can move this part to\n > 0005-Add PERIODs.patch?\n > Also you add change in Makefile in 0003, meson.build change in 0005,\n > better put it on in 0005?\n\nYou're right, those changes should have been in the PERIODs patch. Moved.\n\n > +/*\n > + * We need to handle this shift/reduce conflict:\n > + * FOR PORTION OF valid_at FROM INTERVAL YEAR TO MONTH TO foo.\n > + * This is basically the classic \"dangling else\" problem, and we want a\n > + * similar resolution: treat the TO as part of the INTERVAL, not as part of\n > + * the FROM ... TO .... Users can add parentheses if that's a problem.\n > + * TO just needs to be higher precedence than YEAR_P etc.\n > + * TODO: I need to figure out a %prec solution before this gets committed!\n > + */\n > +%nonassoc YEAR_P MONTH_P DAY_P HOUR_P MINUTE_P\n > +%nonassoc TO\n >\n > this part will never happen?\n > since \"FROM INTERVAL YEAR TO MONTH TO\"\n > means \"valid_at\" will be interval range data type, which does not exist now.\n\nIt appears still needed to me. Without those lines I get 4 shift/reduce\nconflicts. Are you seeing something different? Or if you have a better\nsolution I'd love to add it. I definitely need to fix this before that\npatch gets applied.\n\n > for all the refactor related to ri_PerformCheck, do you need (Datum) 0\n > instead of plain 0?\n\nCasts added.\n\n > + <para>\n > + If the table has a range column or\n > + <link linkend=\"ddl-periods-application-periods\"><literal>PERIOD</literal></link>,\n > + you may supply a <literal>FOR PORTION OF</literal> clause, and\n > your delete will\n > + only affect rows that overlap the given interval. Furthermore, if\n > a row's span\n >\n > \nhttps://influentialpoints.com/Training/basic_statistics_ranges.htm#:~:text=A%20range%20is%20two%20numbers,or%20the%20difference%20between%20them\n > So \"range\" is more accurate than \"interval\"?\n\nI don't think we should be using R to define the terms \"range\" and\n\"interval\", which both already have meanings in Postgres, SQL, and the\nliterature for temporal databases. But I'm planning to revise the docs'\nterminology here anyway. Some temporal database texts use \"interval\"\nin this sense, and I thought it was a decent term to mean \"range or\nPERIOD\". But now we need something to mean \"range or multirange or\ncustom type or PERIOD\". Actually \"portion\" seems like maybe the best\nterm, since the SQL syntax `FOR PORTION OF` reinforces that term. If you\nhave suggestions I'm happy for ideas.\n\n > +/* ----------\n > + * ForPortionOfState()\n > + *\n > + * Copies a ForPortionOfState into the current memory context.\n > + */\n > +static ForPortionOfState *\n > +CopyForPortionOfState(ForPortionOfState *src)\n > +{\n > + ForPortionOfState *dst = NULL;\n > + if (src) {\n > + MemoryContext oldctx;\n > + RangeType *r;\n > + TypeCacheEntry *typcache;\n > +\n > + /*\n > + * Need to lift the FOR PORTION OF details into a higher memory context\n > + * because cascading foreign key update/deletes can cause triggers to fire\n > + * triggers, and the AfterTriggerEvents will outlive the FPO\n > + * details of the original query.\n > + */\n > + oldctx = MemoryContextSwitchTo(TopTransactionContext);\n >\n > should it be \"Copy a ForPortionOfState into the TopTransactionContext\"?\n\nYou're right, the other function comments here use imperative mood. Changed.\n\nNew patches attached, rebased to 43b46aae12. I'll work on your feedback from Jan 4 next. Thanks!\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Fri, 5 Jan 2024 16:19:53 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Fri, Jan 5, 2024 at 1:06 PM jian he <jian.universality@gmail.com> wrote:\n>\n> On Tue, Jan 2, 2024 at 9:59 AM Paul Jungwirth\n> <pj@illuminatedcomputing.com> wrote:\n> >\n> > On 12/31/23 00:51, Paul Jungwirth wrote:\n> > > That's it for now.\n> >\n> > Here is another update. I fixed FOR PORTION OF on partitioned tables, in particular when the attnums\n> > are different from the root partition.\n> >\n> > Rebased to cea89c93a1.\n> >\n>\n> Hi.\n>\n> +/*\n> + * range_without_portion_internal - Sets outputs and outputn to the ranges\n> + * remaining and their count (respectively) after subtracting r2 from r1.\n> + * The array should never contain empty ranges.\n> + * The outputs will be ordered. We expect that outputs is an array of\n> + * RangeType pointers, already allocated with two slots.\n> + */\n> +void\n> +range_without_portion_internal(TypeCacheEntry *typcache, RangeType *r1,\n> + RangeType *r2, RangeType **outputs, int *outputn)\n> I am confused with the name \"range_without_portion\", I think\n> \"range_not_overlap\" would be better.\n>\n\nrange_intersect returns the intersection of two ranges.\nI think here we are doing the opposite.\nnames the main SQL function \"range_not_intersect\" and the internal\nfunction as \"range_not_intersect_internal\" should be fine.\nso people don't need to understand the meaning of \"portion\".\n\n\n", "msg_date": "Mon, 8 Jan 2024 22:54:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 1/8/24 06:54, jian he wrote:\n > On Fri, Jan 5, 2024 at 1:06 PM jian he <jian.universality@gmail.com> wrote:\n >\n > range_intersect returns the intersection of two ranges.\n > I think here we are doing the opposite.\n > names the main SQL function \"range_not_intersect\" and the internal\n > function as \"range_not_intersect_internal\" should be fine.\n > so people don't need to understand the meaning of \"portion\".\n\nThank you for helping me figure out a name here! I realize that can be a bike-sheddy kind of \ndiscussion, so let me share some of my principles.\n\nRange and multirange are highly mathematically \"pure\", and that's something I value in them. It \nmakes them more general-purpose, less encumbered by edge cases, easier to combine, and easier to \nreason about. Preserving that close connection to math is a big goal.\n\nWhat I've called `without_portion` is (like) a closed form of minus (hence `@-` for the operator). \nMinus isn't closed under everything (e.g. ranges), so `without_portion` adds arrays---much as to \nclose subtraction we add negative numbers and to close division we add rationals). We get the same \neffect from multiranges, but that only buys us range support. It would be awesome to support \narbitrary types: ranges, multiranges, mdranges, boxes, polygons, inets, etc., so I think an array is \nthe way to go here. And then each array element is a \"leftover\". What do we call a closed form of \nminus that returns arrays?\n\nUsing \"not\" suggests a function that returns true/false, but `@-` returns an array of things. So \ninstead of \"not\" let's consider \"complement\". I think that's what you're expressing re intersection.\n\nBut `@-` is not the same as the complement of intersection. For one thing, `@-` is not commutative. \n`old_range @- target_portion` is not the same as `target_portion @- old_range`. But \n`complement(old_range * target_portion)` *is* the same as `complement(target_portion * old_range)`. \nOr from another angle: it's true that `old_range @- target_portion = old_range @- (old_range * \ntarget_portion)`, but the intersection isn't \"doing\" anything here. It's true that intersection and \nminus both \"reduce\" what you put in, but minus is more accurate.\n\nSo I think we want a name that captures that idea of \"minus\". Both \"not\" and \"intersection\" are \nmisleading IMO.\n\nOf course \"minus\" is already taken (and you wouldn't expect it to return arrays anyway), which is \nwhy I'm thinking about names like \"without\" or \"except\". Or maybe \"multi-minus\". I still think \n\"without portion\" is the closest to capturing everything above (and avoids ambiguity with other SQL \noperations). And the \"portion\" ties the operator to `FOR PORTION OF`, which is its purpose. But I \nwouldn't be surprised if there were something better.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Mon, 8 Jan 2024 10:54:13 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Sat, Jan 6, 2024 at 8:20 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> Getting caught up on reviews from November and December:\n>\n>\n> New patches attached, rebased to 43b46aae12. I'll work on your feedback from Jan 4 next. Thanks!\n>\n\n+/*\n+ * ForPortionOfClause\n+ * representation of FOR PORTION OF <period-name> FROM <ts> TO <te>\n+ * or FOR PORTION OF <period-name> (<target>)\n+ */\n+typedef struct ForPortionOfClause\n+{\n+ NodeTag type;\n+ char *range_name;\n+ int range_name_location;\n+ Node *target;\n+ Node *target_start;\n+ Node *target_end;\n+} ForPortionOfClause;\n\n\"range_name_location\" can be just \"location\"?\ngenerally most of the struct put the \"location\" to the last field in the struct.\n(that's the pattern I found all over other code)\n\n+ if (isUpdate)\n+ {\n+ /*\n+ * Now make sure we update the start/end time of the record.\n+ * For a range col (r) this is `r = r * targetRange`.\n+ */\n+ Expr *rangeSetExpr;\n+ TargetEntry *tle;\n+\n+ strat = RTIntersectStrategyNumber;\n+ GetOperatorFromCanonicalStrategy(opclass, InvalidOid, \"intersects\",\n\"FOR PORTION OF\", &opid, &strat);\n+ rangeSetExpr = (Expr *) makeSimpleA_Expr(AEXPR_OP, get_opname(opid),\n+ (Node *) copyObject(rangeVar), targetExpr,\n+ forPortionOf->range_name_location);\n+ rangeSetExpr = (Expr *) transformExpr(pstate, (Node *) rangeSetExpr,\nEXPR_KIND_UPDATE_PORTION);\n+\n+ /* Make a TLE to set the range column */\n+ result->rangeSet = NIL;\n+ tle = makeTargetEntry(rangeSetExpr, range_attno, range_name, false);\n+ result->rangeSet = lappend(result->rangeSet, tle);\n+\n+ /* Mark the range column as requiring update permissions */\n+ target_perminfo->updatedCols = bms_add_member(target_perminfo->updatedCols,\n+ range_attno - FirstLowInvalidHeapAttributeNumber);\n+ }\n+ else\n+ result->rangeSet = NIL;\nI think the name \"rangeSet\" is misleading, since \"set\" is generally\nrelated to a set of records.\nbut here it's more about the \"range intersect\".\n\nin ExecDelete\nwe have following code pattern:\nExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);\nif (processReturning && resultRelInfo->ri_projectReturning)\n{\n....\nif (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,\n SnapshotAny, slot))\nelog(ERROR, \"failed to fetch deleted tuple for DELETE RETURNING\");\n}\n}\n\nbut the ExecForPortionOfLeftovers is inside ExecDeleteEpilogue.\nmeaning even without ExecForPortionOfLeftovers, we can still call\ntable_tuple_fetch_row_version\nalso if it was *not* concurrently updated, then our current process\nholds the lock until the ending of the transaction, i think.\nSo the following TODO is unnecessary?\n\n+ /*\n+ * Get the range of the old pre-UPDATE/DELETE tuple,\n+ * so we can intersect it with the FOR PORTION OF target\n+ * and see if there are any \"leftovers\" to insert.\n+ *\n+ * We have already locked the tuple in ExecUpdate/ExecDelete\n+ * (TODO: if it was *not* concurrently updated, does\ntable_tuple_update lock the tuple itself?\n+ * I don't found the code for that yet, and maybe it depends on the AM?)\n+ * and it has passed EvalPlanQual.\n+ * Make sure we're looking at the most recent version.\n+ * Otherwise concurrent updates of the same tuple in READ COMMITTED\n+ * could insert conflicting \"leftovers\".\n+ */\n+ if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,\ntupleid, SnapshotAny, oldtupleSlot))\n+ elog(ERROR, \"failed to fetch tuple for FOR PORTION OF\");\n+\n\n+/* ----------------------------------------------------------------\n+ * ExecForPortionOfLeftovers\n+ *\n+ * Insert tuples for the untouched timestamp of a row in a FOR\n+ * PORTION OF UPDATE/DELETE\n+ * ----------------------------------------------------------------\n+ */\n+static void\n+ExecForPortionOfLeftovers(ModifyTableContext *context,\n+ EState *estate,\n+ ResultRelInfo *resultRelInfo,\n+ ItemPointer tupleid)\n\nmaybe change the comment to\n\"Insert tuples for the not intersection of a row in a FOR PORTION OF\nUPDATE/DELETE.\"\n\n+ deconstruct_array(DatumGetArrayTypeP(allLeftovers),\ntypcache->type_id, typcache->typlen,\n+ typcache->typbyval, typcache->typalign, &leftovers, NULL, &nleftovers);\n+\n+ if (nleftovers > 0)\n+ {\nI think add something like assert nleftovers >=0 && nleftovers <= 2\n(assume only range not multirange) would improve readability.\n\n\n+ <para>\n+ If the table has a range column or\n+ <link linkend=\"ddl-periods-application-periods\"><literal>PERIOD</literal></link>,\n+ you may supply a <literal>FOR PORTION OF</literal> clause, and\nyour delete will\n+ only affect rows that overlap the given interval. Furthermore, if\na row's span\n+ extends outside the <literal>FOR PORTION OF</literal> bounds, then\nyour delete\n+ will only change the span within those bounds. In effect you are\ndeleting any\n+ moment targeted by <literal>FOR PORTION OF</literal> and no moments outside.\n+ </para>\n+\n+ <para>\n+ Specifically, after <productname>PostgreSQL</productname> deletes\nthe existing row,\n+ it will <literal>INSERT</literal>\n+ new rows whose range or start/end column(s) receive the remaining\nspan outside\n+ the targeted bounds, containing the original values in other columns.\n+ There will be zero to two inserted records,\n+ depending on whether the original span extended before the targeted\n+ <literal>FROM</literal>, after the targeted <literal>TO</literal>,\nboth, or neither.\n+ </para>\n+\n+ <para>\n+ These secondary inserts fire <literal>INSERT</literal> triggers. First\n+ <literal>BEFORE DELETE</literal> triggers first, then\n+ <literal>BEFORE INSERT</literal>, then <literal>AFTER INSERT</literal>,\n+ then <literal>AFTER DELETE</literal>.\n+ </para>\n+\n+ <para>\n+ These secondary inserts do not require <literal>INSERT</literal>\nprivilege on the table.\n+ This is because conceptually no new information has been added.\nThe inserted rows only preserve\n+ existing data about the untargeted time period. Note this may\nresult in users firing <literal>INSERT</literal>\n+ triggers who don't have insert privileges, so be careful about\n<literal>SECURITY DEFINER</literal> trigger functions!\n+ </para>\n\nI think you need to wrap them into a big paragraph, otherwise they\nlose the context?\nplease see the attached build sql-update.html.\n\nalso I think\n+ <link linkend=\"ddl-periods-application-periods\"><literal>PERIOD</literal></link>,\nshould shove into Add-PERIODs.patch.\n\notherwise you cannot build Add-UPDATE-DELETE-FOR-PORTION-OF.patch\nwithout all the patches.\nI think the \"FOR-PORTION-OF\" feature is kind of independ?\nBecause, IMHO, \"for portion\" is a range datum interacting with another\nsingle range datum, but the primary key with \"WITHOUT OVERLAPS\", is\nrange datum interacting with a set of range datums.\nnow I cannot just git apply v22-0006-Add-UPDATE-DELETE-FOR-PORTION-OF.patch.\nThat maybe would make it more difficult to get commited?", "msg_date": "Tue, 9 Jan 2024 08:00:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Tue, Jan 9, 2024 at 2:54 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On 1/8/24 06:54, jian he wrote:\n> > On Fri, Jan 5, 2024 at 1:06 PM jian he <jian.universality@gmail.com> wrote:\n> >\n> > range_intersect returns the intersection of two ranges.\n> > I think here we are doing the opposite.\n> > names the main SQL function \"range_not_intersect\" and the internal\n> > function as \"range_not_intersect_internal\" should be fine.\n> > so people don't need to understand the meaning of \"portion\".\n>\n> Thank you for helping me figure out a name here! I realize that can be a bike-sheddy kind of\n> discussion, so let me share some of my principles.\n>\n> Range and multirange are highly mathematically \"pure\", and that's something I value in them. It\n> makes them more general-purpose, less encumbered by edge cases, easier to combine, and easier to\n> reason about. Preserving that close connection to math is a big goal.\n>\n> What I've called `without_portion` is (like) a closed form of minus (hence `@-` for the operator).\n> Minus isn't closed under everything (e.g. ranges), so `without_portion` adds arrays---much as to\n> close subtraction we add negative numbers and to close division we add rationals). We get the same\n> effect from multiranges, but that only buys us range support. It would be awesome to support\n> arbitrary types: ranges, multiranges, mdranges, boxes, polygons, inets, etc., so I think an array is\n> the way to go here. And then each array element is a \"leftover\". What do we call a closed form of\n> minus that returns arrays?\n>\n> Of course \"minus\" is already taken (and you wouldn't expect it to return arrays anyway), which is\n> why I'm thinking about names like \"without\" or \"except\". Or maybe \"multi-minus\". I still think\n> \"without portion\" is the closest to capturing everything above (and avoids ambiguity with other SQL\n> operations). And the \"portion\" ties the operator to `FOR PORTION OF`, which is its purpose. But I\n> wouldn't be surprised if there were something better.\n>\n\nThanks for the deep explanation. I think the name\nrange_without_portion is better than my range_not_intersect.\nI learned a lot.\nI also googled \" bike-sheddy\". haha.\n\nsrc5=# select range_without_portion(numrange(1.0,3.0,'[]'),\nnumrange(1.5,2.0,'(]'));\n range_without_portion\n---------------------------\n {\"[1.0,1.5]\",\"(2.0,3.0]\"}\n(1 row)\n\nsrc5=# \\gdesc\n Column | Type\n-----------------------+-----------\n range_without_portion | numeric[]\n(1 row)\n\nsrc5=# \\df range_without_portion\n List of functions\n Schema | Name | Result data type | Argument data\ntypes | Type\n------------+-----------------------+------------------+---------------------+------\n pg_catalog | range_without_portion | anyarray | anyrange,\nanyrange | func\n(1 row)\n\nso apparently, you cannot from (anyrange, anyrange) get anyarray the\nelement type is anyrange.\nI cannot find the documented explanation in\nhttps://www.postgresql.org/docs/current/extend-type-system.html#EXTEND-TYPES-POLYMORPHIC\n\nanyrange is POLYMORPHIC, anyarray is POLYMORPHIC,\nbut I suppose, getting an anyarray the element type is anyrange would be hard.\n\n\n", "msg_date": "Tue, 9 Jan 2024 13:33:33 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Sat, 6 Jan 2024 at 05:50, Paul Jungwirth <pj@illuminatedcomputing.com> wrote:\n>\n> Getting caught up on reviews from November and December:\n>\n> On 11/19/23 22:57, jian he wrote:\n> >\n> > I believe the following part should fail. Similar tests on\n> > src/test/regress/sql/generated.sql. line begin 347.\n> >\n> > drop table if exists gtest23a,gtest23x cascade;\n> > CREATE TABLE gtest23a (x int4range, y int4range,\n> > CONSTRAINT gtest23a_pk PRIMARY KEY (x, y WITHOUT OVERLAPS));\n> > CREATE TABLE gtest23x (a int4range, b int4range GENERATED ALWAYS AS\n> > ('empty') STORED,\n> > FOREIGN KEY (a, PERIOD b ) REFERENCES gtest23a(x, PERIOD y) ON UPDATE\n> > CASCADE); -- should be error?\n>\n> Okay, I've added a restriction for temporal FKs too. But note this will\n> change once the PERIODs patch (the last one here) is finished. When the\n> generated column is for a PERIOD, there will be logic to \"reroute\" the\n> updates to the constituent start/end columns instead.\n>\n> > begin;\n> > drop table if exists fk, pk cascade;\n> > CREATE TABLE pk (id int4range, valid_at int4range,\n> > CONSTRAINT pk_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS)\n> > );\n> > CREATE TABLE fk (\n> > id int4range,valid_at tsrange, parent_id int4range,\n> > CONSTRAINT fk FOREIGN KEY (parent_id, valid_at)\n> > REFERENCES pk\n> > );\n> > rollback;\n> > --\n> > the above query will return an error: number of referencing and\n> > referenced columns for foreign key disagree.\n> > but if you look at it closely, primary key and foreign key columns both are two!\n> > The error should be saying valid_at should be specified with \"PERIOD\".\n>\n> Ah okay, thanks for the clarification! This is tricky because the user\n> left out the PERIOD on the fk side, and left out the entire pk side, so\n> those columns are just implicit. So there is no PERIOD anywhere.\n> But I agree that if the pk has WITHOUT OVERLAPS, we should expect a\n> corresponding PERIOD modifier on the fk side and explain that that's\n> what's missing. The attached patches include that.\n>\n> > I found out other issues in v18.\n> > I first do `git apply` then `git diff --check`, there is a white\n> > space error in v18-0005.\n>\n> Fixed, thanks!\n>\n> > You also need to change update.sgml and delete.sgml <title>Outputs</title> part.\n> > Since at most, it can return 'UPDATE 3' or 'DELETE 3'.\n>\n> This doesn't sound correct to me. An UPDATE or DELETE can target many\n> rows. Also I don't think the inserted \"leftovers\" should be included in\n> these counts. They represent the rows updated/deleted.\n>\n> > --the following query should work?\n> > drop table pk;\n> > CREATE table pk(a numrange PRIMARY key,b text);\n> > insert into pk values('[1,10]');\n> > create or replace function demo1() returns void as $$\n> > declare lb numeric default 1; up numeric default 3;\n> > begin\n> > update pk for portion of a from lb to up set b = 'lb_to_up';\n> > return;\n> > end\n> > $$ language plpgsql;\n> > select * from demo1();\n>\n> Hmm this is a tough one. It is correct that the `FROM __ TO __` values cannot be column references.\n> They are computed up front, not per row. One reason is they are used to search the table. In fact\n> the standard basically allows nothing but literal strings here. See section 14.14, page 971 then\n> look up <point in time> on page 348 and <datetime value expression> on page 308. The most\n> flexibility you get is you can add/subtract an interval to the datetime literal. We are already well\n> past that by allowing expressions, (certain) functions, parameters, etc.\n>\n> OTOH in your plpgsql example they are not really columns. They just get represented as ColumnRefs\n> and then passed to transformColumnRef. I'm surprised plpgsql does it that way. As a workaround you\n> could use `EXECUTE format(...)`, but I'd love to make that work as you show instead. I'll keep\n> working on this one but it's not done yet. Perhaps I can move the restriction into\n> analysis/planning. If anyone has any advice it'd be welcome.\n>\n> On 12/6/23 05:22, jian he wrote:\n> > this TODO:\n> > * TODO: It sounds like FOR PORTION OF might need to do something here too?\n> > based on comments on ExprContext. I refactor a bit, and solved this TODO.\n>\n> The patch looks wrong to me. We need to range targeted by `FROM __\n> TO __` to live for the whole statement, not just one tuple (see just\n> above). That's why it gets computed in the Init function node.\n>\n> I don't think that TODO is needed anymore at all. Older versions of the\n> patch had more expressions besides this one, and I think it was those I\n> was concerned about. So I've removed the comment here.\n>\n> > tring to the following TODO:\n> > // TODO: Need to save context->mtstate->mt_transition_capture? (See\n> > comment on ExecInsert)\n> >\n> > but failed.\n> > I also attached the trial, and also added the related test.\n> >\n> > You can also use the test to check portion update with insert trigger\n> > with \"referencing old table as old_table new table as new_table\"\n> > situation.\n>\n> Thank you for the test case! This is very helpful. So the problem is\n> `referencing new table as new_table` gets lost. I don't have a fix yet\n> but I'll work on it.\n>\n> On 12/11/23 00:31, jian he wrote:\n> > - false); /* quiet */\n> > + false); /* quiet */\n> >\n> > Is the above part unnecessary?\n>\n> Good catch! Fixed.\n>\n> > I am confused. so now I only apply v19, 0001 to 0003.\n> > period_to_range function never used. maybe we can move this part to\n> > 0005-Add PERIODs.patch?\n> > Also you add change in Makefile in 0003, meson.build change in 0005,\n> > better put it on in 0005?\n>\n> You're right, those changes should have been in the PERIODs patch. Moved.\n>\n> > +/*\n> > + * We need to handle this shift/reduce conflict:\n> > + * FOR PORTION OF valid_at FROM INTERVAL YEAR TO MONTH TO foo.\n> > + * This is basically the classic \"dangling else\" problem, and we want a\n> > + * similar resolution: treat the TO as part of the INTERVAL, not as part of\n> > + * the FROM ... TO .... Users can add parentheses if that's a problem.\n> > + * TO just needs to be higher precedence than YEAR_P etc.\n> > + * TODO: I need to figure out a %prec solution before this gets committed!\n> > + */\n> > +%nonassoc YEAR_P MONTH_P DAY_P HOUR_P MINUTE_P\n> > +%nonassoc TO\n> >\n> > this part will never happen?\n> > since \"FROM INTERVAL YEAR TO MONTH TO\"\n> > means \"valid_at\" will be interval range data type, which does not exist now.\n>\n> It appears still needed to me. Without those lines I get 4 shift/reduce\n> conflicts. Are you seeing something different? Or if you have a better\n> solution I'd love to add it. I definitely need to fix this before that\n> patch gets applied.\n>\n> > for all the refactor related to ri_PerformCheck, do you need (Datum) 0\n> > instead of plain 0?\n>\n> Casts added.\n>\n> > + <para>\n> > + If the table has a range column or\n> > + <link linkend=\"ddl-periods-application-periods\"><literal>PERIOD</literal></link>,\n> > + you may supply a <literal>FOR PORTION OF</literal> clause, and\n> > your delete will\n> > + only affect rows that overlap the given interval. Furthermore, if\n> > a row's span\n> >\n> >\n> https://influentialpoints.com/Training/basic_statistics_ranges.htm#:~:text=A%20range%20is%20two%20numbers,or%20the%20difference%20between%20them\n> > So \"range\" is more accurate than \"interval\"?\n>\n> I don't think we should be using R to define the terms \"range\" and\n> \"interval\", which both already have meanings in Postgres, SQL, and the\n> literature for temporal databases. But I'm planning to revise the docs'\n> terminology here anyway. Some temporal database texts use \"interval\"\n> in this sense, and I thought it was a decent term to mean \"range or\n> PERIOD\". But now we need something to mean \"range or multirange or\n> custom type or PERIOD\". Actually \"portion\" seems like maybe the best\n> term, since the SQL syntax `FOR PORTION OF` reinforces that term. If you\n> have suggestions I'm happy for ideas.\n>\n> > +/* ----------\n> > + * ForPortionOfState()\n> > + *\n> > + * Copies a ForPortionOfState into the current memory context.\n> > + */\n> > +static ForPortionOfState *\n> > +CopyForPortionOfState(ForPortionOfState *src)\n> > +{\n> > + ForPortionOfState *dst = NULL;\n> > + if (src) {\n> > + MemoryContext oldctx;\n> > + RangeType *r;\n> > + TypeCacheEntry *typcache;\n> > +\n> > + /*\n> > + * Need to lift the FOR PORTION OF details into a higher memory context\n> > + * because cascading foreign key update/deletes can cause triggers to fire\n> > + * triggers, and the AfterTriggerEvents will outlive the FPO\n> > + * details of the original query.\n> > + */\n> > + oldctx = MemoryContextSwitchTo(TopTransactionContext);\n> >\n> > should it be \"Copy a ForPortionOfState into the TopTransactionContext\"?\n>\n> You're right, the other function comments here use imperative mood. Changed.\n>\n> New patches attached, rebased to 43b46aae12. I'll work on your feedback from Jan 4 next. Thanks!\n\nOne of the test has failed in CFBot at [1] with:\n\ndiff -U3 /tmp/cirrus-ci-build/src/test/regress/expected/generated.out\n/tmp/cirrus-ci-build/src/test/recovery/tmp_check/results/generated.out\n--- /tmp/cirrus-ci-build/src/test/regress/expected/generated.out\n2024-01-06 00:34:48.078691251 +0000\n+++ /tmp/cirrus-ci-build/src/test/recovery/tmp_check/results/generated.out\n2024-01-06 00:42:08.782292390 +0000\n@@ -19,7 +19,9 @@\n table_name | column_name | dependent_column\n ------------+-------------+------------------\n gtest1 | a | b\n-(1 row)\n+ pt | de | p\n+ pt | ds | p\n+(3 rows)\n\nMore details of the failure is available at [2].\n\n[1] - https://cirrus-ci.com/task/5739983420522496\n[2] - https://api.cirrus-ci.com/v1/artifact/task/5739983420522496/log/src/test/recovery/tmp_check/log/regress_log_027_stream_regress\n\nRegards,\nVignesh\n\n\n", "msg_date": "Tue, 9 Jan 2024 12:16:34 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 31.12.23 09:51, Paul Jungwirth wrote:\n> On Wed, Dec 6, 2023 at 12:59 AM Peter Eisentraut <peter@eisentraut.org> \n> wrote:\n> >\n> > On 02.12.23 19:41, Paul Jungwirth wrote:\n> > > So what do you think of this idea instead?:\n> > >\n> > > We could add a new (optional) support function to GiST that translates\n> > > \"well-known\" strategy numbers into the opclass's own strategy numbers.\n> >\n> > I had some conversations about this behind the scenes.  I think this\n> > idea makes sense.\n> \n> Here is a patch series with the GiST stratnum support function added. I \n> put this into a separate patch (before all the temporal ones), so it's \n> easier to review. Then in the PK patch (now #2) we call that function to \n> figure out the = and && operators. I think this is a big improvement.\n\nI like this solution.\n\nHere is some more detailed review of the first two patches. (I reviewed \nv20; I see you have also posted v21, but they don't appear very \ndifferent for this purpose.)\n\nv20-0001-Add-stratnum-GiST-support-function.patch\n\n* contrib/btree_gist/Makefile\n\nNeeds corresponding meson.build updates.\n\n* contrib/btree_gist/btree_gist--1.7--1.8.sql\n\nShould gist_stratnum_btree() live in contrib/btree_gist/ or in core?\nAre there other extensions that use the btree strategy numbers for\ngist?\n\n+ALTER OPERATOR FAMILY gist_vbit_ops USING gist ADD\n+ FUNCTION 12 (varbit, varbit) gist_stratnum_btree (int2) ;\n\nIs there a reason for the extra space after FUNCTION here (repeated\nthroughout the file)?\n\n+-- added in 1.4:\n\nWhat is the purpose of these \"added in\" comments?\n\n\nv20-0002-Add-temporal-PRIMARY-KEY-and-UNIQUE-constraints.patch\n\n* contrib/btree_gist/Makefile\n\nAlso update meson.build.\n\n* contrib/btree_gist/sql/without_overlaps.sql\n\nMaybe also insert a few values, to verify that the constraint actually\ndoes something?\n\n* doc/src/sgml/ref/create_table.sgml\n\nIs \"must have a range type\" still true? With the changes to the\nstrategy number mapping, any type with a supported operator class\nshould work?\n\n* src/backend/utils/adt/ruleutils.c\n\nIs it actually useful to add an argument to\ndecompile_column_index_array()? Wouldn't it be easier to just print\nthe \" WITHOUT OVERLAPS\" in the caller after returning from it?\n\n* src/include/access/gist_private.h\n\nThe added function gistTranslateStratnum() isn't really \"private\" to\ngist. So access/gist.h would be a better place for it.\n\nAlso, most other functions there appear to be named \"GistSomething\",\nso a more consistent name might be GistTranslateStratnum.\n\n* src/include/access/stratnum.h\n\nThe added StrategyIsValid() doesn't seem that useful? Plenty of\nexisting code just compares against InvalidStrategy, and there is only\none caller for the new function. I suggest to do without it.\n\n* src/include/commands/defrem.h\n\nWe are using two terms here, well-known strategy number and canonical\nstrategy number, to mean the same thing (I think?). Let's try to\nstick with one. Or explain the relationship?\n\n\nIf these points are addressed, and maybe with another round of checking \nthat all corner cases are covered, I think these patches (0001 and 0002) \nare close to ready.\n\n\n\n", "msg_date": "Thu, 11 Jan 2024 15:44:50 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Thu, Jan 11, 2024 at 10:44 PM Peter Eisentraut <peter@eisentraut.org> wrote:\n>\n> On 31.12.23 09:51, Paul Jungwirth wrote:\n> > On Wed, Dec 6, 2023 at 12:59 AM Peter Eisentraut <peter@eisentraut.org>\n> > wrote:\n> > >\n> > > On 02.12.23 19:41, Paul Jungwirth wrote:\n> > > > So what do you think of this idea instead?:\n> > > >\n> > > > We could add a new (optional) support function to GiST that translates\n> > > > \"well-known\" strategy numbers into the opclass's own strategy numbers.\n> > >\n> > > I had some conversations about this behind the scenes. I think this\n> > > idea makes sense.\n> >\n> > Here is a patch series with the GiST stratnum support function added. I\n> > put this into a separate patch (before all the temporal ones), so it's\n> > easier to review. Then in the PK patch (now #2) we call that function to\n> > figure out the = and && operators. I think this is a big improvement.\n>\n> I like this solution.\n>\n> Here is some more detailed review of the first two patches. (I reviewed\n> v20; I see you have also posted v21, but they don't appear very\n> different for this purpose.)\n>\n> v20-0001-Add-stratnum-GiST-support-function.patch\n>\n> * contrib/btree_gist/Makefile\n>\n> Needs corresponding meson.build updates.\n\nfixed\n\n>\n> * contrib/btree_gist/btree_gist--1.7--1.8.sql\n>\n> Should gist_stratnum_btree() live in contrib/btree_gist/ or in core?\n> Are there other extensions that use the btree strategy numbers for\n> gist?\n>\n> +ALTER OPERATOR FAMILY gist_vbit_ops USING gist ADD\n> + FUNCTION 12 (varbit, varbit) gist_stratnum_btree (int2) ;\n>\n> Is there a reason for the extra space after FUNCTION here (repeated\n> throughout the file)?\n>\n\nfixed.\n\n> +-- added in 1.4:\n>\n> What is the purpose of these \"added in\" comments?\n>\n>\n> v20-0002-Add-temporal-PRIMARY-KEY-and-UNIQUE-constraints.patch\n>\n> * contrib/btree_gist/Makefile\n>\n> Also update meson.build.\n\nfixed.\n\n> * contrib/btree_gist/sql/without_overlaps.sql\n>\n> Maybe also insert a few values, to verify that the constraint actually\n> does something?\n>\n\nI added an ok and failed INSERT.\n\n> * doc/src/sgml/ref/create_table.sgml\n>\n> Is \"must have a range type\" still true? With the changes to the\n> strategy number mapping, any type with a supported operator class\n> should work?\n>\n> * src/backend/utils/adt/ruleutils.c\n>\n> Is it actually useful to add an argument to\n> decompile_column_index_array()? Wouldn't it be easier to just print\n> the \" WITHOUT OVERLAPS\" in the caller after returning from it?\n\nfixed. i just print it right after decompile_column_index_array.\n\n> * src/include/access/gist_private.h\n>\n> The added function gistTranslateStratnum() isn't really \"private\" to\n> gist. So access/gist.h would be a better place for it.\n>\n> Also, most other functions there appear to be named \"GistSomething\",\n> so a more consistent name might be GistTranslateStratnum.\n>\n> * src/include/access/stratnum.h\n>\n> The added StrategyIsValid() doesn't seem that useful? Plenty of\n> existing code just compares against InvalidStrategy, and there is only\n> one caller for the new function. I suggest to do without it.\n>\n\nIf more StrategyNumber are used in the future, will StrategyIsValid()\nmake sense?\n\n> * src/include/commands/defrem.h\n>\n> We are using two terms here, well-known strategy number and canonical\n> strategy number, to mean the same thing (I think?). Let's try to\n> stick with one. Or explain the relationship?\n>\n\nIn my words:\nfor range type, well-known strategy number and canonical strategy\nnumber are the same thing.\nFor types Gist does not natively support equality, like int4,\nGetOperatorFromCanonicalStrategy will pass RTEqualStrategyNumber from\nComputeIndexAttrs\nand return BTEqualStrategyNumber.\n\n> If these points are addressed, and maybe with another round of checking\n> that all corner cases are covered, I think these patches (0001 and 0002)\n> are close to ready.\n>\n\nthe following are my review:\n\n+ /* exclusionOpNames can be non-NIL if we are creating a partition */\n+ if (iswithoutoverlaps && exclusionOpNames == NIL)\n+ {\n+ indexInfo->ii_ExclusionOps = palloc_array(Oid, nkeycols);\n+ indexInfo->ii_ExclusionProcs = palloc_array(Oid, nkeycols);\n+ indexInfo->ii_ExclusionStrats = palloc_array(uint16, nkeycols);\n+ }\nI am not sure the above comment is related to the code\n\n+/*\n+ * Returns the btree number for equals, otherwise invalid.\n+ *\n+ * This is for GiST opclasses in btree_gist (and maybe elsewhere)\n+ * that use the BT*StrategyNumber constants.\n+ */\n+Datum\n+gist_stratnum_btree(PG_FUNCTION_ARGS)\n+{\n+ StrategyNumber strat = PG_GETARG_UINT16(0);\n+\n+ switch (strat)\n+ {\n+ case RTEqualStrategyNumber:\n+ PG_RETURN_UINT16(BTEqualStrategyNumber);\n+ case RTLessStrategyNumber:\n+ PG_RETURN_UINT16(BTLessStrategyNumber);\n+ case RTLessEqualStrategyNumber:\n+ PG_RETURN_UINT16(BTLessEqualStrategyNumber);\n+ case RTGreaterStrategyNumber:\n+ PG_RETURN_UINT16(BTGreaterStrategyNumber);\n+ case RTGreaterEqualStrategyNumber:\n+ PG_RETURN_UINT16(BTGreaterEqualStrategyNumber);\n+ default:\n+ PG_RETURN_UINT16(InvalidStrategy);\n+ }\nthe above comment seems not right?\neven though currently strat will only be RTEqualStrategyNumber.\n\n+void\n+GetOperatorFromCanonicalStrategy(Oid opclass,\n+ Oid atttype,\n+ const char *opname,\n+ Oid *opid,\n+ StrategyNumber *strat)\n+{\n+ Oid opfamily;\n+ Oid opcintype;\n+ StrategyNumber opstrat = *strat;\n+\n+ *opid = InvalidOid;\n+\n+ if (get_opclass_opfamily_and_input_type(opclass,\n+ &opfamily,\n+ &opcintype))\n+ {\n+ /*\n+ * Ask the opclass to translate to its internal stratnum\n+ *\n+ * For now we only need GiST support, but this could support\n+ * other indexams if we wanted.\n+ */\n+ *strat = gistTranslateStratnum(opclass, opstrat);\n+ if (!StrategyIsValid(*strat))\n+ ereport(ERROR,\n+ (errcode(ERRCODE_UNDEFINED_OBJECT),\n+ errmsg(\"no %s operator found for WITHOUT OVERLAPS constraint\", opname),\n+ errdetail(\"Could not translate strategy number %u for opclass %d.\",\n+ opstrat, opclass),\n+ errhint(\"Define a stratnum support function for your GiST opclass.\")));\n+\n+ *opid = get_opfamily_member(opfamily, opcintype, opcintype, *strat);\n+ }\n+\n+ if (!OidIsValid(*opid))\n+ {\n+ HeapTuple opftuple;\n+ Form_pg_opfamily opfform;\n+\n+ /*\n+ * attribute->opclass might not explicitly name the opfamily,\n+ * so fetch the name of the selected opfamily for use in the\n+ * error message.\n+ */\n+ opftuple = SearchSysCache1(OPFAMILYOID,\n+ ObjectIdGetDatum(opfamily));\n+ if (!HeapTupleIsValid(opftuple))\n+ elog(ERROR, \"cache lookup failed for opfamily %u\",\n+ opfamily);\n+ opfform = (Form_pg_opfamily) GETSTRUCT(opftuple);\n+\n+ ereport(ERROR,\n+ (errcode(ERRCODE_UNDEFINED_OBJECT),\n+ errmsg(\"no %s operator found for WITHOUT OVERLAPS constraint\", opname),\n+ errdetail(\"There must be an %s operator within opfamily \\\"%s\\\" for\ntype \\\"%s\\\".\",\n+ opname,\n+ NameStr(opfform->opfname),\n+ format_type_be(atttype))));\n+ }\n+}\nI refactored this function.\nGetOperatorFromCanonicalStrategy called both for normal and WITHOUT OVERLAPS.\nso errmsg(\"no %s operator found for WITHOUT OVERLAPS constraint\",\nopname) would be misleading\nfor columns without \"WITHOUT OVERLAPS\".\nAlso since that error part was deemed unreachable, it would make the\nerror verbose, I guess.\n\n--- a/src/bin/psql/describe.c\n+++ b/src/bin/psql/describe.c\n@@ -2379,6 +2379,10 @@ describeOneTableDetails(const char *schemaname,\n else\n appendPQExpBufferStr(&buf, \", false AS indisreplident\");\n appendPQExpBufferStr(&buf, \", c2.reltablespace\");\n+ if (pset.sversion >= 170000)\n+ appendPQExpBufferStr(&buf, \", con.conwithoutoverlaps\");\n+ else\n+ appendPQExpBufferStr(&buf, \", false AS conwithoutoverlaps\");\n\nI don't know how to verify it.\nI think it should be:\n+ if (pset.sversion >= 170000)\n+ appendPQExpBufferStr(&buf, \", con.conwithoutoverlaps\");\n\nI refactored the 0002 commit message.\nThe original commit message seems outdated.\nI put all the related changes into one attachment.", "msg_date": "Sun, 14 Jan 2024 08:00:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Hello,\n\nHere are new patches consolidating feedback from several emails.\nI haven't addressed everything but I think I'm overdue for a reply:\n\nOn 1/4/24 21:06, jian he wrote:\n >\n > I am confused.\n > say condition: \" (cmp_l1l2 <= 0 && cmp_u1l2 >= 0 && cmp_u1u2 <= 0)\"\n > the following code will only run PartA, never run PartB?\n >\n > `\n > else if (cmp_l1l2 >= 0 && cmp_u1u2 <= 0)\n > PartA\n > else if (cmp_l1l2 <= 0 && cmp_u1l2 >= 0 && cmp_u1u2 <= 0)\n > PartB\n > `\n >\n > minimum example:\n > #include<stdio.h>\n > #include<string.h>\n > #include<stdlib.h>\n > #include<assert.h>\n > int\n > main(void)\n > {\n > int cmp_l1l2;\n > int cmp_u1u2;\n > int cmp_u1l2;\n > int cmp_l1u2;\n > cmp_l1u2 = -1;\n > cmp_l1l2 = 0;\n > cmp_u1u2 = 0;\n > cmp_u1l2 = 0;\n > assert(cmp_u1l2 == 0);\n > if (cmp_l1u2 > 0 || cmp_u1l2 < 0)\n > printf(\"calling partA\\n\");\n > else if (cmp_l1l2 >= 0 && cmp_u1u2 <= 0)\n > printf(\"calling partB\\n\");\n > else if (cmp_l1l2 <= 0 && cmp_u1l2 >= 0 && cmp_u1u2 <= 0)\n > printf(\"calling partC\\n\");\n > }\n\nAll of the branches are used. I've attached a `without_portion.c` minimal example showing different \ncases. For ranges it helps to go through the Allen relationships \n(https://en.wikipedia.org/wiki/Allen%27s_interval_algebra) to make a comprehensive check. (But note \nthat our operators don't exactly match that terminology, and it's important to consider \nclosed-vs-open and unbounded cases.)\n\n > I am confused with the name \"range_without_portion\", I think\n > \"range_not_overlap\" would be better.\n\nI think I covered this in my other reply and we are now in agreement, but if that's mistaken let \nknow me.\n\n > select numrange(1.1, 2.2) @- numrange(2.0, 3.0);\n > the result is not the same as\n > select numrange(2.0, 3.0) @- numrange(1.1, 2.2);\n\nCorrect, @- is not commutative.\n\n > So your categorize oprkind as 'b' for operator \"@-\" is wrong?\n > select oprname,oprkind,oprcanhash,oprcanmerge,oprleft,oprright,oprresult,oprcode\n > from pg_operator\n > where oprname = '@-';\n\n'b' is the correct oprkind. It is a binary (infix) operator.\n\n > aslo\n > select count(*), oprkind from pg_operator group by oprkind;\n > there are only 5% are prefix operators.\n > maybe we should design it as:\n > 1. if both inputs are empty range, the result array is empty.\n > 2. if both inputs are non-empty and never overlaps, put both of them\n > to the result array.\n > 3. if one input is empty another one is not, then put the non-empty\n > one into the result array.\n\nAlso covered before, but if any of this still applies please let me know.\n\n > after applying the patch: now the catalog data seems not correct to me.\n > SELECT a1.amopfamily\n > ,a1.amoplefttype::regtype\n > ,a1.amoprighttype\n > ,a1.amopstrategy\n > ,amoppurpose\n > ,amopsortfamily\n > ,amopopr\n > ,op.oprname\n > ,am.amname\n > FROM pg_amop as a1 join pg_operator op on op.oid = a1.amopopr\n > join pg_am am on am.oid = a1.amopmethod\n > where amoppurpose = 'p';\n > output:\n > amopfamily | amoplefttype | amoprighttype | amopstrategy |\n > amoppurpose | amopsortfamily | amopopr | oprname | amname\n > \n------------+---------------+---------------+--------------+-------------+----------------+---------+---------+--------\n > 2593 | box | 603 | 31 | p\n > | 0 | 803 | # | gist\n > 3919 | anyrange | 3831 | 31 | p\n > | 0 | 3900 | * | gist\n > 6158 | anymultirange | 4537 | 31 | p\n > | 0 | 4394 | * | gist\n > 3919 | anyrange | 3831 | 32 | p\n > | 0 | 8747 | @- | gist\n > 6158 | anymultirange | 4537 | 32 | p\n > | 0 | 8407 | @- | gist\n > (5 rows)\n >\n > select oprcode, oprname, oprleft::regtype\n > from pg_operator opr\n > where opr.oprname in ('#','*','@-')\n > and oprleft = oprright\n > and oprleft in (603,3831,4537);\n > output:\n >\n > oprcode | oprname | oprleft\n > ----------------------------+---------+---------------\n > box_intersect | # | box\n > range_intersect | * | anyrange\n > multirange_intersect | * | anymultirange\n > range_without_portion | @- | anyrange\n > multirange_without_portion | @- | anymultirange\n > (5 rows)\n\nThis seems correct. '#' is the name of the box overlaps operator. Probably I should add a box @- \noperator too. But see below. . . .\n\n > should amoppurpose = 'p' is true apply to ' @-' operator?\n\nYes.\n\n > catalog-pg-amop.html:\n > `\n > amopsortfamily oid (references pg_opfamily.oid):\n > The B-tree operator family this entry sorts according to, if an\n > ordering operator; zero if a search operator\n > `\n > you should also update the above entry, the amopsortfamily is also\n > zero for \"portion operator\" for the newly implemented \"portion\n > operator\".\n\nOkay, done.\n\n > v21-0006-Add-UPDATE-DELETE-FOR-PORTION-OF.patch\n > create mode 100644 src/backend/utils/adt/period.c\n > create mode 100644 src/include/utils/period.h\n > you should put these two files to v21-0008-Add-PERIODs.patch.\n > it's not related to that patch, it also makes people easy to review.\n\nYou're right, sorry!\n\nOn 1/8/24 16:00, jian he wrote:\n >\n > +/*\n > + * ForPortionOfClause\n > + * representation of FOR PORTION OF <period-name> FROM <ts> TO <te>\n > + * or FOR PORTION OF <period-name> (<target>)\n > + */\n > +typedef struct ForPortionOfClause\n > +{\n > + NodeTag type;\n > + char *range_name;\n > + int range_name_location;\n > + Node *target;\n > + Node *target_start;\n > + Node *target_end;\n > +} ForPortionOfClause;\n >\n > \"range_name_location\" can be just \"location\"?\n > generally most of the struct put the \"location\" to the last field in the struct.\n > (that's the pattern I found all over other code)\n\nAgreed, done.\n\n > + if (isUpdate)\n > + {\n > + /*\n > + * Now make sure we update the start/end time of the record.\n > + * For a range col (r) this is `r = r * targetRange`.\n > + */\n > + Expr *rangeSetExpr;\n > + TargetEntry *tle;\n > +\n > + strat = RTIntersectStrategyNumber;\n > + GetOperatorFromCanonicalStrategy(opclass, InvalidOid, \"intersects\",\n > \"FOR PORTION OF\", &opid, &strat);\n > + rangeSetExpr = (Expr *) makeSimpleA_Expr(AEXPR_OP, get_opname(opid),\n > + (Node *) copyObject(rangeVar), targetExpr,\n > + forPortionOf->range_name_location);\n > + rangeSetExpr = (Expr *) transformExpr(pstate, (Node *) rangeSetExpr,\n > EXPR_KIND_UPDATE_PORTION);\n > +\n > + /* Make a TLE to set the range column */\n > + result->rangeSet = NIL;\n > + tle = makeTargetEntry(rangeSetExpr, range_attno, range_name, false);\n > + result->rangeSet = lappend(result->rangeSet, tle);\n > +\n > + /* Mark the range column as requiring update permissions */\n > + target_perminfo->updatedCols = bms_add_member(target_perminfo->updatedCols,\n > + range_attno - FirstLowInvalidHeapAttributeNumber);\n > + }\n > + else\n > + result->rangeSet = NIL;\n > I think the name \"rangeSet\" is misleading, since \"set\" is generally\n > related to a set of records.\n > but here it's more about the \"range intersect\".\n\nOkay, I can see that. I used \"rangeSet\" because we add it to the SET clause of the UPDATE command. \nHere I've changed it to rangeTargetList. I think this matches other code and better indicates what \nit holds. Any objections?\n\nIn the PERIOD patch we will need two TLEs here (that's why it's a List): one for the start column \nand one for the end column.\n\n > in ExecDelete\n > we have following code pattern:\n > ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);\n > if (processReturning && resultRelInfo->ri_projectReturning)\n > {\n > ....\n > if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,\n > SnapshotAny, slot))\n > elog(ERROR, \"failed to fetch deleted tuple for DELETE RETURNING\");\n > }\n > }\n >\n > but the ExecForPortionOfLeftovers is inside ExecDeleteEpilogue.\n > meaning even without ExecForPortionOfLeftovers, we can still call\n > table_tuple_fetch_row_version\n > also if it was *not* concurrently updated, then our current process\n > holds the lock until the ending of the transaction, i think.\n > So the following TODO is unnecessary?\n >\n > + /*\n > + * Get the range of the old pre-UPDATE/DELETE tuple,\n > + * so we can intersect it with the FOR PORTION OF target\n > + * and see if there are any \"leftovers\" to insert.\n > + *\n > + * We have already locked the tuple in ExecUpdate/ExecDelete\n > + * (TODO: if it was *not* concurrently updated, does\n > table_tuple_update lock the tuple itself?\n > + * I don't found the code for that yet, and maybe it depends on the AM?)\n > + * and it has passed EvalPlanQual.\n > + * Make sure we're looking at the most recent version.\n > + * Otherwise concurrent updates of the same tuple in READ COMMITTED\n > + * could insert conflicting \"leftovers\".\n > + */\n > + if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,\n > tupleid, SnapshotAny, oldtupleSlot))\n > + elog(ERROR, \"failed to fetch tuple for FOR PORTION OF\");\n\nI think you're right. According to the comments on TM_Result (returned by table_tuple_update), a \nTM_Ok indicates that the lock was acquired.\n\n > +/* ----------------------------------------------------------------\n > + * ExecForPortionOfLeftovers\n > + *\n > + * Insert tuples for the untouched timestamp of a row in a FOR\n > + * PORTION OF UPDATE/DELETE\n > + * ----------------------------------------------------------------\n > + */\n > +static void\n > +ExecForPortionOfLeftovers(ModifyTableContext *context,\n > + EState *estate,\n > + ResultRelInfo *resultRelInfo,\n > + ItemPointer tupleid)\n >\n > maybe change the comment to\n > \"Insert tuples for the not intersection of a row in a FOR PORTION OF\n > UPDATE/DELETE.\"\n\nChanged to \"untouched portion\".\n\n > + deconstruct_array(DatumGetArrayTypeP(allLeftovers),\n > typcache->type_id, typcache->typlen,\n > + typcache->typbyval, typcache->typalign, &leftovers, NULL, &nleftovers);\n > +\n > + if (nleftovers > 0)\n > + {\n > I think add something like assert nleftovers >=0 && nleftovers <= 2\n > (assume only range not multirange) would improve readability.\n\nI added the first assert. The second is not true for non-range types.\n\n > + <para>\n > + If the table has a range column or\n > + <link linkend=\"ddl-periods-application-periods\"><literal>PERIOD</literal></link>,\n > + you may supply a <literal>FOR PORTION OF</literal> clause, and\n > your delete will\n > + only affect rows that overlap the given interval. Furthermore, if\n > a row's span\n > + extends outside the <literal>FOR PORTION OF</literal> bounds, then\n > your delete\n > + will only change the span within those bounds. In effect you are\n > deleting any\n > + moment targeted by <literal>FOR PORTION OF</literal> and no moments outside.\n > + </para>\n > +\n > + <para>\n > + Specifically, after <productname>PostgreSQL</productname> deletes\n > the existing row,\n > + it will <literal>INSERT</literal>\n > + new rows whose range or start/end column(s) receive the remaining\n > span outside\n > + the targeted bounds, containing the original values in other columns.\n > + There will be zero to two inserted records,\n > + depending on whether the original span extended before the targeted\n > + <literal>FROM</literal>, after the targeted <literal>TO</literal>,\n > both, or neither.\n > + </para>\n > +\n > + <para>\n > + These secondary inserts fire <literal>INSERT</literal> triggers. First\n > + <literal>BEFORE DELETE</literal> triggers first, then\n > + <literal>BEFORE INSERT</literal>, then <literal>AFTER INSERT</literal>,\n > + then <literal>AFTER DELETE</literal>.\n > + </para>\n > +\n > + <para>\n > + These secondary inserts do not require <literal>INSERT</literal>\n > privilege on the table.\n > + This is because conceptually no new information has been added.\n > The inserted rows only preserve\n > + existing data about the untargeted time period. Note this may\n > result in users firing <literal>INSERT</literal>\n > + triggers who don't have insert privileges, so be careful about\n > <literal>SECURITY DEFINER</literal> trigger functions!\n > + </para>\n >\n > I think you need to wrap them into a big paragraph, otherwise they\n > lose the context?\n > please see the attached build sql-update.html.\n\nStill TODO.\n\n > also I think\n > + <link linkend=\"ddl-periods-application-periods\"><literal>PERIOD</literal></link>,\n > should shove into Add-PERIODs.patch.\n >\n > otherwise you cannot build Add-UPDATE-DELETE-FOR-PORTION-OF.patch\n > without all the patches.\n\nFixed.\n\n > I think the \"FOR-PORTION-OF\" feature is kind of independ?\n > Because, IMHO, \"for portion\" is a range datum interacting with another\n > single range datum, but the primary key with \"WITHOUT OVERLAPS\", is\n > range datum interacting with a set of range datums.\n > now I cannot just git apply v22-0006-Add-UPDATE-DELETE-FOR-PORTION-OF.patch.\n > That maybe would make it more difficult to get commited?\n\nStill TODO.\n\nOn 1/8/24 21:33, jian he wrote:\n >\n > src5=# select range_without_portion(numrange(1.0,3.0,'[]'),\n > numrange(1.5,2.0,'(]'));\n > range_without_portion\n > ---------------------------\n > {\"[1.0,1.5]\",\"(2.0,3.0]\"}\n > (1 row)\n >\n > src5=# \\gdesc\n > Column | Type\n > -----------------------+-----------\n > range_without_portion | numeric[]\n > (1 row)\n >\n > src5=# \\df range_without_portion\n > List of functions\n > Schema | Name | Result data type | Argument data\n > types | Type\n > ------------+-----------------------+------------------+---------------------+------\n > pg_catalog | range_without_portion | anyarray | anyrange,\n > anyrange | func\n > (1 row)\n >\n > so apparently, you cannot from (anyrange, anyrange) get anyarray the\n > element type is anyrange.\n > I cannot find the documented explanation in\n > https://www.postgresql.org/docs/current/extend-type-system.html#EXTEND-TYPES-POLYMORPHIC\n >\n > anyrange is POLYMORPHIC, anyarray is POLYMORPHIC,\n > but I suppose, getting an anyarray the element type is anyrange would be hard.\n\nYou're right, that is a problem.\n\nI think the right approach is to make intersect and without_portion just be support functions, not \noperators. Then I don't need to introduce the new 'p' amop strategy at all, which seemed like a \ndubious idea anyway. Then the without_portion function can return a SETOF instead of an array.\n\nAnother idea is to add more polymorphic types, anyrangearray and anymultirangearray, but maybe that \nis too big a thing. OTOH I have wanted those same types before. I will take a stab at it.\n\nOn 1/11/24 06:44, Peter Eisentraut wrote:\n > Here is some more detailed review of the first two patches. (I reviewed v20; I see you have also\n > posted v21, but they don't appear very different for this purpose.)\n >\n > v20-0001-Add-stratnum-GiST-support-function.patch\n >\n > * contrib/btree_gist/Makefile\n >\n > Needs corresponding meson.build updates.\n\nFixed.\n\n > * contrib/btree_gist/btree_gist--1.7--1.8.sql\n >\n > Should gist_stratnum_btree() live in contrib/btree_gist/ or in core?\n > Are there other extensions that use the btree strategy numbers for\n > gist?\n\nMoved. None of our other contrib extensions use it. I thought it would be friendly to offer it to \noutside extensions, but maybe that is too speculative.\n\n > +ALTER OPERATOR FAMILY gist_vbit_ops USING gist ADD\n > + FUNCTION 12 (varbit, varbit) gist_stratnum_btree (int2) ;\n >\n > Is there a reason for the extra space after FUNCTION here (repeated\n > throughout the file)?\n\nFixed.\n\n > +-- added in 1.4:\n >\n > What is the purpose of these \"added in\" comments?\n\nI added those to help me make sure I was including every type in the extension, but I've taken them \nout here.\n\n > v20-0002-Add-temporal-PRIMARY-KEY-and-UNIQUE-constraints.patch\n >\n > * contrib/btree_gist/Makefile\n >\n > Also update meson.build.\n\nDone.\n\n > * contrib/btree_gist/sql/without_overlaps.sql\n >\n > Maybe also insert a few values, to verify that the constraint actually\n > does something?\n\nDone.\n\n > * doc/src/sgml/ref/create_table.sgml\n >\n > Is \"must have a range type\" still true? With the changes to the\n > strategy number mapping, any type with a supported operator class\n > should work?\n\nUpdated. Probably more docs to come; I want to go through them all now that we support more types.\n\n > * src/backend/utils/adt/ruleutils.c\n >\n > Is it actually useful to add an argument to\n > decompile_column_index_array()? Wouldn't it be easier to just print\n > the \" WITHOUT OVERLAPS\" in the caller after returning from it?\n\nOkay, done.\n\n > * src/include/access/gist_private.h\n >\n > The added function gistTranslateStratnum() isn't really \"private\" to\n > gist. So access/gist.h would be a better place for it.\n\nMoved.\n\n > Also, most other functions there appear to be named \"GistSomething\",\n > so a more consistent name might be GistTranslateStratnum.\n >\n > * src/include/access/stratnum.h\n\nChanged.\n\n > The added StrategyIsValid() doesn't seem that useful? Plenty of\n > existing code just compares against InvalidStrategy, and there is only\n > one caller for the new function. I suggest to do without it.\n >\n > * src/include/commands/defrem.h\n\nOkay, removed.\n\n > We are using two terms here, well-known strategy number and canonical\n > strategy number, to mean the same thing (I think?). Let's try to\n > stick with one. Or explain the relationship?\n\nTrue. Changed everything to \"well-known\" which seems like a better match for what's going on.\n\nI haven't gone through jian he's Jan 13 patch yet, but since he was also implementing Peter's \nrequests I thought I should share what I have. I did this work a while ago, but I was hoping to \nfinish the TODOs above first, and then we got hit with a winter storm that knocked out power. Sorry \nto cause duplicate work!\n\nRebased to 2f35c14cfb.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Wed, 17 Jan 2024 19:59:06 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "2024-01 Commitfest.\n\nHi, This patch has a CF status of \"Needs Review\" [1], but it seems\nthere were CFbot test failures last time it was run [2]. Please have a\nlook and post an updated version if necessary.\n\n======\n[1] https://commitfest.postgresql.org/46/4308/\n[2] https://cirrus-ci.com/github/postgresql-cfbot/postgresql/commitfest/46/4308\n\nKind Regards,\nPeter Smith.\n\n\n", "msg_date": "Mon, 22 Jan 2024 16:59:35 +1100", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 18.01.24 04:59, Paul Jungwirth wrote:\n> Here are new patches consolidating feedback from several emails.\n\nI have committed 0001 and 0002 (the primary key support).\n\nThe only significant tweak I did was the error messages in \nGetOperatorFromWellKnownStrategy(), to make the messages translatable \nbetter and share wording with other messages. These messages are \ndifficult to reach, so we'll probably have to wait for someone to \nactually encounter them to see if they are useful.\n\nI would like to work on 0003 and 0004 (the foreign key support) during \nFebruary/March. The patches beyond that are probably too optimistic for \nPG17. I recommend you focus getting 0003/0004 in good shape soon.\n\n\n\n", "msg_date": "Wed, 24 Jan 2024 17:32:58 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 1/24/24 08:32, Peter Eisentraut wrote:\n > On 18.01.24 04:59, Paul Jungwirth wrote:\n >> Here are new patches consolidating feedback from several emails.\n >\n > I have committed 0001 and 0002 (the primary key support).\n\nThanks Peter! I noticed the comment on gist_stratnum_btree was out-of-date, so here is a tiny patch \ncorrecting it.\n\nAlso the remaining patches with some updates:\n\nI fixed the dependency issues with PERIODs and their (hidden) GENERATED range columns. This has been \ncausing test failures and bugging me since I reordered the patches at PgCon, so I'm glad to finally \nclean it up. The PERIOD should have an INTERNAL dependency on the range column, but then when you \ndropped the table the dependency code thought the whole table was part of the INTERNAL dependency, \nso the drop would fail. The PERIOD patch here fixes the dependency logic. (I guess this is the first \ntime a column has been an internal dependency of something.)\n\nI also fixed an error message when you try to change the type of a start/end column used by a \nPERIOD. Previously the error message would complain about the GENERATED column, not the PERIOD, \nwhich seems confusing. In fact it was non-deterministic, depending on which pg_depend record the \nindex returned first.\n\nOn 12/6/23 05:22, jian he wrote:\n > tring to the following TODO:\n > // TODO: Need to save context->mtstate->mt_transition_capture? (See\n > comment on ExecInsert)\n >\n > but failed.\n > I also attached the trial, and also added the related test.\n >\n > You can also use the test to check portion update with insert trigger\n > with \"referencing old table as old_table new table as new_table\"\n > situation.\n\nThank you for the very helpful test case here. I fixed the issue of not passing along the transition \ntable. But there is still more work to do here I think:\n\n- The AFTER INSERT FOR EACH ROW triggers have *both* leftover rows in the NEW table. Now the docs do \nsay that for AFTER triggers, a named transition table can see all the changes from the *statement* \n(although that seems pretty weird to me), but the inserts are two *separate* statements. I think the \nSQL:2011 standard is fairly clear about that. So each time the trigger fires we should still get \njust one row in the transition table.\n\n- The AFTER INSERT FOR EACH STATEMENT triggers never fire. That happens outside ExecInsert (in \nExecModifyTable). In fact there is a bunch of stuff in ExecModifyTable that maybe we need to do when \nwe insert leftovers. Do we even need a separate exec node, perhaps wrapping ExecModifyTable? I'm not \nsure that would give us the correct trigger ordering for the triggers on the implicit insert \nstatement(s) vs the explicit update/delete statement, so maybe it does all need to be part of the \nsingle node. But still I think we need to be more careful about memory, especially the per-tuple \ncontext.\n\nI'll keep working on that, but at least in this round of patches the transition tables aren't \nmissing completely.\n\nMy plan is still to replace the 'p' amoppurpose operators with just support functions. I want to do \nthat next, although as Peter requested I'll also start focusing more narrowly on the foreign key \npatches.\n\nRebased to 46a0cd4cef.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Wed, 24 Jan 2024 14:06:56 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 24.01.24 23:06, Paul Jungwirth wrote:\n> On 1/24/24 08:32, Peter Eisentraut wrote:\n> > On 18.01.24 04:59, Paul Jungwirth wrote:\n> >> Here are new patches consolidating feedback from several emails.\n> >\n> > I have committed 0001 and 0002 (the primary key support).\n> \n> Thanks Peter! I noticed the comment on gist_stratnum_btree was \n> out-of-date, so here is a tiny patch correcting it.\n\ncommitted that\n\n\n\n", "msg_date": "Thu, 25 Jan 2024 07:31:44 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "I fixed your tests, some of your tests can be simplified, (mainly\nprimary key constraint is unnecessary for the failed tests)\nalso your foreign key patch test table, temporal_rng is created at\nline 141, and we use it at around line 320.\nit's hard to get the definition of temporal_rng. I drop the table\nand recreate it.\nSo people can view the patch with tests more easily.\n\n\n+ <para>\n+ In a temporal foreign key, the delete/update will use\n+ <literal>FOR PORTION OF</literal> semantics to constrain the\n+ effect to the bounds being deleted/updated in the referenced row.\n+ </para>\n\nin v24-0003-Add-temporal-FOREIGN-KEYs.patch\n <literal>FOR PORTION OF</literal> not yet implemented, so we should\nnot mention it.\n\n+ <para>\n+ If the last column is marked with <literal>PERIOD</literal>,\n+ it must be a period or range column, and the referenced table\n+ must have a temporal primary key.\ncan we change \"it must be a period or range column\" to \"it must be a\nrange column\", maybe we can add it on another patch.", "msg_date": "Mon, 29 Jan 2024 08:00:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Mon, Jan 29, 2024 at 8:00 AM jian he <jian.universality@gmail.com> wrote:\n>\n> I fixed your tests, some of your tests can be simplified, (mainly\n> primary key constraint is unnecessary for the failed tests)\n> also your foreign key patch test table, temporal_rng is created at\n> line 141, and we use it at around line 320.\n> it's hard to get the definition of temporal_rng. I drop the table\n> and recreate it.\n> So people can view the patch with tests more easily.\n>\nI've attached a new patch that further simplified the tests. (scope\nv24 patch's 0002 and 0003)\nPlease ignore previous email attachments.\n\nI've only applied the v24, 0002, 0003.\nseems in doc/src/sgml/ref/create_table.sgml\nlack the explanation of `<replaceable\nclass=\"parameter\">temporal_interval</replaceable>`\n\nsince foreign key ON {UPDATE | DELETE} {CASCADE,SET NULL,SET DEFAULT}\nnot yet supported,\nv24-0003 create_table.sgml should reflect that.\n\n+ /*\n+ * For FKs with PERIOD we need an operator and aggregate function\n+ * to check whether the referencing row's range is contained\n+ * by the aggregated ranges of the referenced row(s).\n+ * For rangetypes this is fk.periodatt <@ range_agg(pk.periodatt).\n+ * FKs will look these up at \"runtime\", but we should make sure\n+ * the lookup works here.\n+ */\n+ if (is_temporal)\n+ FindFKPeriodOpersAndProcs(opclasses[numpks - 1], &periodoperoid,\n&periodprocoid);\n\nwithin the function ATAddForeignKeyConstraint, you called\nFindFKPeriodOpersAndProcs,\nbut never used the computed outputs: periodoperoid, periodprocoid,\nopclasses.\nWe validate these(periodoperoid, periodprocoid) at\nlookupTRIOperAndProc, FindFKPeriodOpersAndProcs.\nI'm not sure whether FindFKPeriodOpersAndProcs in\nATAddForeignKeyConstraint is necessary.\n\n+ * Check if all key values in OLD and NEW are \"equivalent\":\n+ * For normal FKs we check for equality.\n+ * For temporal FKs we check that the PK side is a superset of its old\nvalue,\n+ * or the FK side is a subset.\n\"or the FK side is a subset.\" is misleading, should it be something\nlike \"or the FK side is a subset of X\"?\n\n+ if (indexStruct->indisexclusion) return i - 1;\n+ else return i;\n\nI believe our style should be (with proper indent)\nif (indexStruct->indisexclusion)\nreturn i - 1;\nelse\nreturn i;\n\nin transformFkeyCheckAttrs\n+ if (found && is_temporal)\n+ {\n+ found = false;\n+ for (j = 0; j < numattrs + 1; j++)\n+ {\n+ if (periodattnum == indexStruct->indkey.values[j])\n+ {\n+ opclasses[numattrs] = indclass->values[j];\n+ found = true;\n+ break;\n+ }\n+ }\n+ }\n\ncan be simplified:\n{\nfound = false;\nif (periodattnum == indexStruct->indkey.values[numattrs])\n{\nopclasses[numattrs] = indclass->values[numattrs];\nfound = true;\n}\n}\n\nAlso wondering, at the end of the function transformFkeyCheckAttrs `if\n(!found)` part:\ndo we need another error message handle is_temporal is true?\n\n\n@@ -212,8 +213,11 @@ typedef struct NewConstraint\n ConstrType contype; /* CHECK or FOREIGN */\n Oid refrelid; /* PK rel, if FOREIGN */\n Oid refindid; /* OID of PK's index, if FOREIGN */\n+ bool conwithperiod; /* Whether the new FOREIGN KEY uses PERIOD */\n Oid conid; /* OID of pg_constraint entry, if FOREIGN */\n Node *qual; /* Check expr or CONSTR_FOREIGN Constraint */\n+ Oid *operoids; /* oper oids for FOREIGN KEY with PERIOD */\n+ Oid *procoids; /* proc oids for FOREIGN KEY with PERIOD */\n ExprState *qualstate; /* Execution state for CHECK expr */\n } NewConstraint;\nprimary key can only one WITHOUT OVERLAPS,\nso *operoids and *procoids\ncan be replaced with just\n`operoids, procoids`.\nAlso these two elements in struct NewConstraint not used in v24, 0002, 0003.", "msg_date": "Fri, 2 Feb 2024 13:53:52 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "I have done a review of the temporal foreign key patches in this patch\nseries (0002 and 0003, v24).\n\nThe patch set needs a rebase across c85977d8fef. I was able to do it\nmanually, but it's a bit tricky, so perhaps you can post a new set to\nhelp future reviews.\n\n(Also, the last (0007) patch has some compiler warnings and also\ncauses the pg_upgrade test to fail. I didn't check this further, but\nthat's why the cfbot is all red.)\n\nIn summary, in principle, this all looks more or less correct to me.\n\nAs a general comment, we need to figure out the right terminology\n\"period\" vs. \"temporal\", especially if we are going to commit these\nfeatures incrementally. But I didn't look at this too hard here yet.\n\n\n* v24-0002-Add-GiST-referencedagg-support-func.patch\n\nDo we really need this level of generality? Are there examples not\nusing ranges that would need a different aggregate function? Maybe\nsomething with geometry (points and lines)? But it seems to me that\nthen we'd also need some equivalent to \"without portion\" support for\nthose types and a multirange equivalent (basically another gist\nsupport function wrapped around the 0004 patch).\n\n\n* v24-0003-Add-temporal-FOREIGN-KEYs.patch\n\n- contrib/btree_gist/expected/without_overlaps.out\n- contrib/btree_gist/sql/without_overlaps.sql\n\ntypo \"exusts\"\n\n\n- doc/src/sgml/ref/create_table.sgml\n\nThis mentions FOR PORTION OF from a later patch.\n\nIt is not documented that SET NULL and SET DEFAULT are not supported,\neven though that is added in a later patch. (So this patch should say\nthat it's not supported, and then the later patch should remove that.)\n\n\n- src/backend/commands/indexcmds.c\n\nThe changes to GetOperatorFromWellKnownStrategy() don't work for\nmessage translations. We had discussed a similar issue for this\nfunction previously. I think it's ok to leave the function as it was.\nThe additional context could be added with location pointers or\nerrcontext() maybe, but it doesn't seem that important for now.\n\n\n- src/backend/commands/tablecmds.c\n\nThe changes in ATAddForeignKeyConstraint(), which are the meat of the\nchanges in this file, are very difficult to review in detail. I tried\ndifferent git-diff options to get a sensible view, but it wasn't\nhelpful. Do we need to do some separate refactoring here first?\n\nThe error message \"action not supported for temporal foreign keys\"\ncould be more detailed, mention the action. Look for example how the\nerror for the generated columns is phrased. (But note that for\ngenerated columns, the actions are impossible to support, whereas here\nit is just something not done yet. So there should probably still be\ndifferent error codes.)\n\n\n- src/backend/nodes/outfuncs.c\n- src/backend/nodes/readfuncs.c\n\nPerhaps you would like to review my patch 0001 in\n<https://www.postgresql.org/message-id/859d6155-e361-4a05-8db3-4aa1f007ff28@eisentraut.org>,\nwhich removes the custom out/read functions for the Constraint node.\nThen you could get rid of these changes.\n\n\n- src/backend/utils/adt/ri_triggers.c\n\nThe added #include \"catalog/pg_range.h\" doesn't appear to be used for\nanything.\n\nMaybe we can avoid the added #include \"commands/tablecmds.h\" by\nputting the common function in some appropriate lower-level module.\n\ntypo \"PEROID\"\n\nRenaming of ri_KeysEqual() to ri_KeysStable() doesn't improve clarity,\nI think. I think we can leave the old name and add a comment (as you\nhave done). There is a general understanding around this feature set\nthat \"equal\" sometimes means \"contained\" or something like that.\n\nThe function ri_RangeAttributeNeedsCheck() could be documented better.\nIt's bit terse and unclear. From the code, it looks like it is used\ninstead of row equality checks. Maybe a different function name would\nbe suitable.\n\nVarious unnecessary reformatting in RI_FKey_check().\n\nWhen assembling the SQL commands, you need to be very careful about\nfully quoting and schema-qualifying everything. See for example\nri_GenerateQual().\n\nHave you checked that the generated queries can use indexes and have\nsuitable performance? Do you have example execution plans maybe?\n\n\n- src/backend/utils/adt/ruleutils.c\n\nThis seems ok in principle, but it's kind of weird that the new\nargument of decompile_column_index_array() is called \"withPeriod\"\n(which seems appropriate seeing what it does), but what we are passing\nin is conwithoutoverlaps. Maybe we need to reconsider the naming of\nthe constraint column? Sorry, I made you change it from \"contemporal\"\nor something, didn't I? Maybe \"conperiod\" would cover both meanings\nbetter?\n\n\n- src/backend/utils/cache/lsyscache.c\n\nget_func_name_and_namespace(): This function would at least need some\nidentifier quoting. There is only one caller (lookupTRIOperAndProc),\nso let's just put this code inline there; it's not worth a separate\nglobal function. (Also, you could use psprintf() here to simplify\npalloc() + snprintf().)\n\n\n- src/include/catalog/pg_constraint.h\n\nYou are changing in several comments \"equality\" to \"comparison\". I\nsuspect you effectively mean \"equality or containment\"? Maybe\n\"comparison\" is too subtle to convey that meaning? Maybe be more\nexplicit.\n\nYou are changing a foreign key from DECLARE_ARRAY_FOREIGN_KEY to\nDECLARE_ARRAY_FOREIGN_KEY_OPT. Add a comment about it, like the one\njust above has.\n\n\n- src/include/catalog/pg_proc.dat\n\nFor the names of the trigger functions, maybe instead of\n\n TRI_FKey_check_ins\n\nsomething like\n\n RI_FKey_period_check_ins\n\nso that all RI trigger functions group under a common prefix.\n\nOn second thought, do we even need separate functions for this?\nLooking at ri_triggers.c, the temporal and non-temporal functions are\nthe same, and all the differences are handled in the underlying\nimplementation functions.\n\n\n- src/include/nodes/parsenodes.h\n\nThe constants FKCONSTR_PERIOD_OP_CONTAINED_BY and\nFKCONSTR_PERIOD_PROC_REFERENCED_AGG could use more documentation here.\n\nFor the Constraint struct, don't we just need a bool field saying\n\"this is a period FK\", and then we'd know that the last column is the\nperiod? Like we did for the primary keys (bool without_overlaps).\n\n\n- src/include/parser/kwlist.h\n\nFor this patch, the keyword PERIOD can be unreserved. But it\napparently will need to be reserved later for the patch that\nintroduces PERIOD columns. Maybe it would make sense to leave it\nunreserved for this patch and upgrade it in the later one.\n\n\n\n", "msg_date": "Mon, 12 Feb 2024 10:55:09 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Hi\nmore minor issues.\n\n+ FindFKComparisonOperators(\n+ fkconstraint, tab, i, fkattnum,\n+ &old_check_ok, &old_pfeqop_item,\n+ pktypoid[i], fktypoid[i], opclasses[i],\n+ is_temporal, false,\n+ &pfeqoperators[i], &ppeqoperators[i], &ffeqoperators[i]);\n+ }\n+ if (is_temporal) {\n+ pkattnum[numpks] = pkperiodattnum;\n+ pktypoid[numpks] = pkperiodtypoid;\n+ fkattnum[numpks] = fkperiodattnum;\n+ fktypoid[numpks] = fkperiodtypoid;\n\n- pfeqop = get_opfamily_member(opfamily, opcintype, fktyped,\n- eqstrategy);\n- if (OidIsValid(pfeqop))\n- {\n- pfeqop_right = fktyped;\n- ffeqop = get_opfamily_member(opfamily, fktyped, fktyped,\n- eqstrategy);\n- }\n- else\n- {\n- /* keep compiler quiet */\n- pfeqop_right = InvalidOid;\n- ffeqop = InvalidOid;\n- }\n+ FindFKComparisonOperators(\n+ fkconstraint, tab, numpks, fkattnum,\n+ &old_check_ok, &old_pfeqop_item,\n+ pkperiodtypoid, fkperiodtypoid, opclasses[numpks],\n+ is_temporal, true,\n+ &pfeqoperators[numpks], &ppeqoperators[numpks], &ffeqoperators[numpks]);\n+ numfks += 1;\n+ numpks += 1;\n+ }\n\nopening curly brace should be the next line, also do you think it's\ngood idea to add following in the `if (is_temporal)` branch\n`\nAssert(OidIsValid(fkperiodtypoid) && OidIsValid(pkperiodtypoid));\nAssert(OidIsValid(pkperiodattnum > 0 && fkperiodattnum > 0));\n`\n\n` if (is_temporal)` branch, you can set the FindFKComparisonOperators\n10th argument (is_temporal)\nto true, since you are already in the ` if (is_temporal)` branch.\n\nmaybe we need some extra comments on\n`\n+ numfks += 1;\n+ numpks += 1;\n`\nsince it might not be that evident?\n\nDo you think it's a good idea to list arguments line by line (with\ngood indentation) is good format? like:\nFindFKComparisonOperators(fkconstraint,\ntab,\ni,\nfkattnum,\n&old_check_ok,\n&old_pfeqop_item,\npktypoid[i],\nfktypoid[i],\nopclasses[i],\nfalse,\nfalse,\n&pfeqoperators[i],\n&ppeqoperators[i],\n&ffeqoperators[i]);\n\n\n", "msg_date": "Wed, 14 Feb 2024 13:00:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Hello,\n\nHere is another patch series for application time. It addresses the feedback from the last few \nemails. Details below:\n\nOn 1/28/24 16:00, jian he wrote:\n > + <para>\n > + In a temporal foreign key, the delete/update will use\n > + <literal>FOR PORTION OF</literal> semantics to constrain the\n > + effect to the bounds being deleted/updated in the referenced row.\n > + </para>\n >\n > in v24-0003-Add-temporal-FOREIGN-KEYs.patch\n > <literal>FOR PORTION OF</literal> not yet implemented, so we should\n > not mention it.\n\nFixed.\n\n > + <para>\n > + If the last column is marked with <literal>PERIOD</literal>,\n > + it must be a period or range column, and the referenced table\n > + must have a temporal primary key.\n > can we change \"it must be a period or range column\" to \"it must be a\n > range column\", maybe we can add it on another patch.\n\nRewrote this section to be clearer.\n\nOn 2/1/24 21:53, jian he wrote:\n > I've attached a new patch that further simplified the tests. (scope\n > v24 patch's 0002 and 0003)\n > Please ignore previous email attachments.\n\nThanks, I've pulled in most of these changes to the tests.\n\n > I've only applied the v24, 0002, 0003.\n > seems in doc/src/sgml/ref/create_table.sgml\n > lack the explanation of `<replaceable\n > class=\"parameter\">temporal_interval</replaceable>`\n\nYou're right. Actually I think it is clearer without adding a separate name here, so I've updated \nthe docs to use `column_name | period_name`.\n\n > since foreign key ON {UPDATE | DELETE} {CASCADE,SET NULL,SET DEFAULT}\n > not yet supported,\n > v24-0003 create_table.sgml should reflect that.\n\nUpdated.\n\n > within the function ATAddForeignKeyConstraint, you called\n > FindFKPeriodOpersAndProcs,\n > but never used the computed outputs: periodoperoid, periodprocoid, opclasses.\n > We validate these(periodoperoid, periodprocoid) at\n > lookupTRIOperAndProc, FindFKPeriodOpersAndProcs.\n > I'm not sure whether FindFKPeriodOpersAndProcs in\n > ATAddForeignKeyConstraint is necessary.\n\nThis is explained in the comment above: we will do the same lookup when the foreign key is checked, \nbut we should make sure it works now so we can report the problem to the user.\n\n > + * Check if all key values in OLD and NEW are \"equivalent\":\n > + * For normal FKs we check for equality.\n > + * For temporal FKs we check that the PK side is a superset of its old value,\n > + * or the FK side is a subset.\n > \"or the FK side is a subset.\" is misleading, should it be something\n > like \"or the FK side is a subset of X\"?\n\nOkay, changed.\n\n > + if (indexStruct->indisexclusion) return i - 1;\n > + else return i;\n >\n > I believe our style should be (with proper indent)\n > if (indexStruct->indisexclusion)\n > return i - 1;\n > else\n > return i;\n\nFixed.\n\n > in transformFkeyCheckAttrs\n > + if (found && is_temporal)\n > + {\n > + found = false;\n > + for (j = 0; j < numattrs + 1; j++)\n > + {\n > + if (periodattnum == indexStruct->indkey.values[j])\n > + {\n > + opclasses[numattrs] = indclass->values[j];\n > + found = true;\n > + break;\n > + }\n > + }\n > + }\n >\n > can be simplified:\n > {\n > found = false;\n > if (periodattnum == indexStruct->indkey.values[numattrs])\n > {\n > opclasses[numattrs] = indclass->values[numattrs];\n > found = true;\n > }\n > }\n\nChanged.\n\n > Also wondering, at the end of the function transformFkeyCheckAttrs `if\n > (!found)` part:\n > do we need another error message handle is_temporal is true?\n\nI think the existing error message works well for both temporal and non-temporal cases.\n\n > @@ -212,8 +213,11 @@ typedef struct NewConstraint\n > ConstrType contype; /* CHECK or FOREIGN */\n > Oid refrelid; /* PK rel, if FOREIGN */\n > Oid refindid; /* OID of PK's index, if FOREIGN */\n > + bool conwithperiod; /* Whether the new FOREIGN KEY uses PERIOD */\n > Oid conid; /* OID of pg_constraint entry, if FOREIGN */\n > Node *qual; /* Check expr or CONSTR_FOREIGN Constraint */\n > + Oid *operoids; /* oper oids for FOREIGN KEY with PERIOD */\n > + Oid *procoids; /* proc oids for FOREIGN KEY with PERIOD */\n > ExprState *qualstate; /* Execution state for CHECK expr */\n > } NewConstraint;\n > primary key can only one WITHOUT OVERLAPS,\n > so *operoids and *procoids\n > can be replaced with just\n > `operoids, procoids`.\n > Also these two elements in struct NewConstraint not used in v24, 0002, 0003.\n\nI've removed these entirely. Sorry, they were leftover from an earlier revision.\n\nOn 2/12/24 01:55, Peter Eisentraut wrote:\n > (Also, the last (0007) patch has some compiler warnings and also\n > causes the pg_upgrade test to fail. I didn't check this further, but\n > that's why the cfbot is all red.)\n\nFixed the pg_upgrade problem. I'm not seeing compiler warnings. If they still exist can you point me \nto those?\n\n > As a general comment, we need to figure out the right terminology\n > \"period\" vs. \"temporal\", especially if we are going to commit these\n > features incrementally. But I didn't look at this too hard here yet.\n\nAgreed. I think it is okay to use \"temporal\" in the docs for the feature in general, if we clarify \nthat non-temporal values are also supported. That is what the rest of the world calls this kind of \nthing.\n\nThe word \"period\" is confusing because it can be the `PERIOD` keyword used in temporal FKs, or also \nthe SQL:2011 `PERIOD` object that is like our range types. And then we also have ranges, etc. In the \npast I was using \"interval\" to mean \"range or PERIOD\" (and \"interval\" is used by Date in his \ntemporal book), but perhaps that is too idiosyncratic. I've removed \"interval\" from the FK docs, and \ninstead I've tried to be very explicit and avoid ambiguity. (I haven't given as much attention to \ncleaning up the later patches' docs yet.)\n\n > * v24-0002-Add-GiST-referencedagg-support-func.patch\n >\n > Do we really need this level of generality? Are there examples not\n > using ranges that would need a different aggregate function? Maybe\n > something with geometry (points and lines)? But it seems to me that\n > then we'd also need some equivalent to \"without portion\" support for\n > those types and a multirange equivalent (basically another gist\n > support function wrapped around the 0004 patch).\n\nI'm not sure how else to do it. The issue is that `range_agg` returns a multirange, so the result \ntype doesn't match the inputs. But other types will likely have the same problem: to combine boxes \nyou may need a multibox. The combine mdranges you may need a multimdrange.\n\nI agree we need something to support \"without portion\" too. The patches here give implementations \nfor ranges and multiranges. But that is for `FOR PORTION OF`, so it comes after the foreign key \npatches (part 5 here).\n\nBtw that part changed a bit since v24 because as jian he pointed out, our type system doesn't \nsupport anyrange inputs and an anyrange[] output. So I changed the support funcs to use SETOF. I \ncould alternately add anyrangearray and anymultirangearray pseudotypes. It's not the first time I've \nwanted those, so I'd be happy to go that way if folks are open to it. It seems like it should be a \ntotally separate patch though.\n\n > * v24-0003-Add-temporal-FOREIGN-KEYs.patch\n >\n > - contrib/btree_gist/expected/without_overlaps.out\n > - contrib/btree_gist/sql/without_overlaps.sql\n >\n > typo \"exusts\"\n\nFixed.\n\n > - doc/src/sgml/ref/create_table.sgml\n >\n > This mentions FOR PORTION OF from a later patch.\n >\n > It is not documented that SET NULL and SET DEFAULT are not supported,\n > even though that is added in a later patch. (So this patch should say\n > that it's not supported, and then the later patch should remove that.)\n\nAll fixed.\n\n > - src/backend/commands/indexcmds.c\n >\n > The changes to GetOperatorFromWellKnownStrategy() don't work for\n > message translations. We had discussed a similar issue for this\n > function previously. I think it's ok to leave the function as it was.\n > The additional context could be added with location pointers or\n > errcontext() maybe, but it doesn't seem that important for now.\n\nOkay I've tried a different approach here that should fit better with t9n. Let me know if it still \nneeds work.\n\n > - src/backend/commands/tablecmds.c\n >\n > The changes in ATAddForeignKeyConstraint(), which are the meat of the\n > changes in this file, are very difficult to review in detail. I tried\n > different git-diff options to get a sensible view, but it wasn't\n > helpful. Do we need to do some separate refactoring here first?\n\nI moved the FindFKComparisonOperators refactor into a separate patch, and that seems to confuse git \nless. Your suggestion to group the PERIOD attribute with the others (below) also helped a lot to cut \ndown the diff here. In fact it means I only call FindFKComparisonOperators once, so pulling it into \na separate method is not even necessary anymore. But I do think it helps simplify what's already a \nvery long function, so I've left it in. Let me know if more work is needed here.\n\n > The error message \"action not supported for temporal foreign keys\"\n > could be more detailed, mention the action. Look for example how the\n > error for the generated columns is phrased. (But note that for\n > generated columns, the actions are impossible to support, whereas here\n > it is just something not done yet. So there should probably still be\n > different error codes.)\n\nFixed.\n\n > - src/backend/nodes/outfuncs.c\n > - src/backend/nodes/readfuncs.c\n >\n > Perhaps you would like to review my patch 0001 in\n > <https://www.postgresql.org/message-id/859d6155-e361-4a05-8db3-4aa1f007ff28@eisentraut.org>,\n > which removes the custom out/read functions for the Constraint node.\n > Then you could get rid of these changes.\n\nThat is a nice improvement!\n\n > - src/backend/utils/adt/ri_triggers.c\n >\n > The added #include \"catalog/pg_range.h\" doesn't appear to be used for\n > anything.\n\nRemoved.\n\n > Maybe we can avoid the added #include \"commands/tablecmds.h\" by\n > putting the common function in some appropriate lower-level module.\n\nMoved to pg_constraint.{c,h}.\n\n > typo \"PEROID\"\n\nFixed.\n\n > Renaming of ri_KeysEqual() to ri_KeysStable() doesn't improve clarity,\n > I think. I think we can leave the old name and add a comment (as you\n > have done). There is a general understanding around this feature set\n > that \"equal\" sometimes means \"contained\" or something like that.\n\nOkay.\n\n > The function ri_RangeAttributeNeedsCheck() could be documented better.\n > It's bit terse and unclear. From the code, it looks like it is used\n > instead of row equality checks. Maybe a different function name would\n > be suitable.\n\nI realized I could simplify this a lot and reuse ri_AttributesEqual, so the whole method is gone now.\n\n > Various unnecessary reformatting in RI_FKey_check().\n\nFixed, sorry about that.\n\n > When assembling the SQL commands, you need to be very careful about\n > fully quoting and schema-qualifying everything. See for example\n > ri_GenerateQual().\n\nWent through everything and added quoting & schemes to a few places that were missing it.\n\n > Have you checked that the generated queries can use indexes and have\n > suitable performance? Do you have example execution plans maybe?\n\nThe plans look good to me. Here are some tests:\n\n-- test when inserting/updating the FK side:\n\nregression=# explain analyze select 1\nfrom (\nselect valid_at as r\nfrom only temporal_rng x\nwhere id = '[8,8]'\nand valid_at && '[2010-01-01,2012-01-01)'\nfor key share of x\n) x1\nhaving '[2010-01-01,2012-01-01)'::tsrange <@ range_agg(x1.r);\n QUERY PLAN \n\n---------------------------------------------------------------------------------------------------------------------------------------------------\n Aggregate (cost=8.19..8.20 rows=1 width=4) (actual time=0.165..0.167 rows=0 loops=1)\n Filter: ('[\"2010-01-01 00:00:00\",\"2012-01-01 00:00:00\")'::tsrange <@ range_agg(x1.r))\n Rows Removed by Filter: 1\n -> Subquery Scan on x1 (cost=0.14..8.18 rows=1 width=32) (actual time=0.152..0.153 rows=0 loops=1)\n -> LockRows (cost=0.14..8.17 rows=1 width=38) (actual time=0.151..0.151 rows=0 loops=1)\n -> Index Scan using temporal_rng_pk on temporal_rng x (cost=0.14..8.16 rows=1 \nwidth=38) (actual time=0.150..0.150 rows=0 loops=1)\n Index Cond: ((id = '[8,9)'::int4range) AND (valid_at && '[\"2010-01-01 \n00:00:00\",\"2012-01-01 00:00:00\")'::tsrange))\n Planning Time: 0.369 ms\n Execution Time: 0.289 ms\n(9 rows)\n\n-- test when deleting/updating from the PK side:\n\nregression=# explain analyze select 1 from only temporal_rng x where id = '[8,8]' and valid_at && \n'[2010-01-01,2012-01-01)'\nfor key share of x;\n QUERY PLAN \n\n---------------------------------------------------------------------------------------------------------------------------------------\n LockRows (cost=0.14..8.17 rows=1 width=10) (actual time=0.079..0.079 rows=0 loops=1)\n -> Index Scan using temporal_rng_pk on temporal_rng x (cost=0.14..8.16 rows=1 width=10) \n(actual time=0.078..0.078 rows=0 loops=1)\n Index Cond: ((id = '[8,9)'::int4range) AND (valid_at && '[\"2010-01-01 \n00:00:00\",\"2012-01-01 00:00:00\")'::tsrange))\n Planning Time: 0.249 ms\n Execution Time: 0.123 ms\n(5 rows)\n\nI will do some further tests with more rows, but I haven't yet.\n\n > - src/backend/utils/adt/ruleutils.c\n >\n > This seems ok in principle, but it's kind of weird that the new\n > argument of decompile_column_index_array() is called \"withPeriod\"\n > (which seems appropriate seeing what it does), but what we are passing\n > in is conwithoutoverlaps. Maybe we need to reconsider the naming of\n > the constraint column? Sorry, I made you change it from \"contemporal\"\n > or something, didn't I? Maybe \"conperiod\" would cover both meanings\n > better?\n\nCertainly conperiod is easier to read. Since we are using it for PK/UNIQUE/FKs, conperiod also seems \nlike a better match. FKs don't use WITHOUT OVERLAPS syntax, and OTOH PK/UNIQUEs will still accept a \nPERIOD (eventually, also a range/etc now). I've renamed it, but since the old name was already \ncommitted with the PK patch, I've broken the renaming into a separate patch that could be committed \nwithout anything else.\n\n > - src/backend/utils/cache/lsyscache.c\n >\n > get_func_name_and_namespace(): This function would at least need some\n > identifier quoting. There is only one caller (lookupTRIOperAndProc),\n > so let's just put this code inline there; it's not worth a separate\n > global function. (Also, you could use psprintf() here to simplify\n > palloc() + snprintf().)\n\nRemoved.\n\n > - src/include/catalog/pg_constraint.h\n >\n > You are changing in several comments \"equality\" to \"comparison\". I\n > suspect you effectively mean \"equality or containment\"? Maybe\n > \"comparison\" is too subtle to convey that meaning? Maybe be more\n > explicit.\n\nOkay, changed.\n\n > You are changing a foreign key from DECLARE_ARRAY_FOREIGN_KEY to\n > DECLARE_ARRAY_FOREIGN_KEY_OPT. Add a comment about it, like the one\n > just above has.\n\nI don't need this change at all now that we're using GENERATED columns for PERIODs, so I've taken it \nout.\n\n > - src/include/catalog/pg_proc.dat\n >\n > For the names of the trigger functions, maybe instead of\n >\n > TRI_FKey_check_ins\n >\n > something like\n >\n > RI_FKey_period_check_ins\n >\n > so that all RI trigger functions group under a common prefix.\n\nRenamed.\n\n > On second thought, do we even need separate functions for this?\n > Looking at ri_triggers.c, the temporal and non-temporal functions are\n > the same, and all the differences are handled in the underlying\n > implementation functions.\n\nMy thinking was to avoid making the non-temporal functions suffer in performance and complexity. \nWhat do you think? I've kept the separate functions here but I can combine them if you like.\n\n > - src/include/nodes/parsenodes.h\n >\n > The constants FKCONSTR_PERIOD_OP_CONTAINED_BY and\n > FKCONSTR_PERIOD_PROC_REFERENCED_AGG could use more documentation here.\n\nRemoved. They are obsolete now (and were already in v24---sorry!).\n\n > For the Constraint struct, don't we just need a bool field saying\n > \"this is a period FK\", and then we'd know that the last column is the\n > period? Like we did for the primary keys (bool without_overlaps).\n\nOkay, changed. Also in ATExecAddConstraint we can treat the PERIOD element like any other FK \nelement, which simplifies the changes there a lot.\n\n > - src/include/parser/kwlist.h\n >\n > For this patch, the keyword PERIOD can be unreserved. But it\n > apparently will need to be reserved later for the patch that\n > introduces PERIOD columns. Maybe it would make sense to leave it\n > unreserved for this patch and upgrade it in the later one.\n\nI tried doing this but got a shift/reduce conflict, so it's still reserved here.\n\nThanks,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Thu, 29 Feb 2024 13:16:56 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 2/13/24 21:00, jian he wrote:\n> Hi\n> more minor issues.\n> \n> + FindFKComparisonOperators(\n> + fkconstraint, tab, i, fkattnum,\n> + &old_check_ok, &old_pfeqop_item,\n> + pktypoid[i], fktypoid[i], opclasses[i],\n> + is_temporal, false,\n> + &pfeqoperators[i], &ppeqoperators[i], &ffeqoperators[i]);\n> + }\n> + if (is_temporal) {\n> + pkattnum[numpks] = pkperiodattnum;\n> + pktypoid[numpks] = pkperiodtypoid;\n> + fkattnum[numpks] = fkperiodattnum;\n> + fktypoid[numpks] = fkperiodtypoid;\n> \n> - pfeqop = get_opfamily_member(opfamily, opcintype, fktyped,\n> - eqstrategy);\n> - if (OidIsValid(pfeqop))\n> - {\n> - pfeqop_right = fktyped;\n> - ffeqop = get_opfamily_member(opfamily, fktyped, fktyped,\n> - eqstrategy);\n> - }\n> - else\n> - {\n> - /* keep compiler quiet */\n> - pfeqop_right = InvalidOid;\n> - ffeqop = InvalidOid;\n> - }\n> + FindFKComparisonOperators(\n> + fkconstraint, tab, numpks, fkattnum,\n> + &old_check_ok, &old_pfeqop_item,\n> + pkperiodtypoid, fkperiodtypoid, opclasses[numpks],\n> + is_temporal, true,\n> + &pfeqoperators[numpks], &ppeqoperators[numpks], &ffeqoperators[numpks]);\n> + numfks += 1;\n> + numpks += 1;\n> + }\n> \n> opening curly brace should be the next line,\n\nFixed in v25 (submitted in my other email).\n\n> also do you think it's\n> good idea to add following in the `if (is_temporal)` branch\n> `\n> Assert(OidIsValid(fkperiodtypoid) && OidIsValid(pkperiodtypoid));\n> Assert(OidIsValid(pkperiodattnum > 0 && fkperiodattnum > 0));\n> `\n> \n> ` if (is_temporal)` branch, you can set the FindFKComparisonOperators\n> 10th argument (is_temporal)\n> to true, since you are already in the ` if (is_temporal)` branch.\n> \n> maybe we need some extra comments on\n> `\n> + numfks += 1;\n> + numpks += 1;\n> `\n> since it might not be that evident?\n\nThat branch doesn't exist anymore. Same with the increments.\n\n> Do you think it's a good idea to list arguments line by line (with\n> good indentation) is good format? like:\n> FindFKComparisonOperators(fkconstraint,\n> tab,\n> i,\n> fkattnum,\n> &old_check_ok,\n> &old_pfeqop_item,\n> pktypoid[i],\n> fktypoid[i],\n> opclasses[i],\n> false,\n> false,\n> &pfeqoperators[i],\n> &ppeqoperators[i],\n> &ffeqoperators[i]);\n\nThere are places we do that, but most code I've seen tries to fill the line. I haven't followed that \nstrictly here, but I'm trying to get better at doing what pg_indent wants.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Thu, 29 Feb 2024 14:10:49 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 2/29/24 13:16, Paul Jungwirth wrote:\n> Hello,\n> \n> Here is another patch series for application time.\nHere is a v26 patch series to fix a cfbot failure in sepgsql. Rebased to 655dc31046.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Fri, 1 Mar 2024 12:38:27 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 3/1/24 12:38, Paul Jungwirth wrote:\n> On 2/29/24 13:16, Paul Jungwirth wrote:\n> Here is a v26 patch series to fix a cfbot failure in sepgsql. Rebased to 655dc31046.\n\nv27 attached, fixing some cfbot failures from headerscheck+cpluspluscheck. Sorry for the noise!\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Fri, 1 Mar 2024 13:56:16 -0800", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 01.03.24 22:56, Paul Jungwirth wrote:\n> On 3/1/24 12:38, Paul Jungwirth wrote:\n>> On 2/29/24 13:16, Paul Jungwirth wrote:\n>> Here is a v26 patch series to fix a cfbot failure in sepgsql. Rebased \n>> to 655dc31046.\n> \n> v27 attached, fixing some cfbot failures from \n> headerscheck+cpluspluscheck. Sorry for the noise!\n\nI had committed v27-0001-Rename-conwithoutoverlaps-to-conperiod.patch a \nlittle while ago.\n\nI have reviewed v27-0002 through 0004 now. I have one semantic question \nbelow, and there are a few places where more clarification of the \ninterfaces could help. Other than that, I think this is pretty good.\n\nAttached is a small patch that changes the PERIOD keyword to unreserved \nfor this patch. You had said earlier that this didn't work for you. \nThe attached patch works for me when applied on top of 0003.\n\n\n* v27-0002-Add-GiST-referencedagg-support-func.patch\n\nYou wrote:\n\n > I'm not sure how else to do it. The issue is that `range_agg` returns \n > a multirange, so the result\n > type doesn't match the inputs. But other types will likely have the\n > same problem: to combine boxes\n > you may need a multibox. The combine mdranges you may need a\n > multimdrange.\n\nCan we just hardcode the use of range_agg for this release? Might be \neasier. I don't see all this generality being useful in the near future.\n\n > Btw that part changed a bit since v24 because as jian he pointed out, \n > our type system doesn't\n > support anyrange inputs and an anyrange[] output. So I changed the\n > support funcs to use SETOF.\n\nI didn't see any SETOF stuff in the patch, or I didn't know where to look.\n\nI'm not sure I follow all the details here. So more explanations of any \nkind could be helpful.\n\n\n* v27-0003-Refactor-FK-operator-lookup.patch\n\nI suggest to skip this refactoring patch. I don't think the way this is \nsliced up is all that great, and it doesn't actually help with the \nsubsequent patches.\n\n\n* v27-0004-Add-temporal-FOREIGN-KEYs.patch\n\n- src/backend/catalog/pg_constraint.c\n\nFindFKPeriodOpersAndProcs() could use a bit more top-level\ndocumentation. Where does the input opclass come from? What are the\nthree output values? What is the business with \"symmetric types\"?\n\n- src/backend/commands/indexcmds.c\n\nGetOperatorFromWellKnownStrategy() is apparently changed to accept\nInvalidOid for rhstype, but the meaning of this is not explained in\nthe function header. It's also not clear to me why an existing caller\nis changed. This should be explained more thoroughly.\n\n- src/backend/commands/tablecmds.c\n\nis_temporal and similar should be renamed to with_period or similar \nthroughout this patch.\n\nIn transformFkeyGetPrimaryKey():\n\n * Now build the list of PK attributes from the indkey definition (we\n- * assume a primary key cannot have expressional elements)\n+ * assume a primary key cannot have expressional elements, unless it\n+ * has a PERIOD)\n\nI think the original statement is still true even with PERIOD. The \nexpressional elements refer to expression indexes. I don't think we can \nhave a PERIOD marker on an expression?\n\n- src/backend/utils/adt/ri_triggers.c\n\nPlease remove the separate trigger functions for the period case. They \nare the same as the non-period ones, so we don't need separate ones. \nThe difference is handled lower in the call stack, which I think is a \ngood setup. Removing the separate functions also removes a lot of extra \ncode in other parts of the patch.\n\n- src/include/catalog/pg_constraint.h\n\nShould also update catalogs.sgml accordingly.\n\n- src/test/regress/expected/without_overlaps.out\n- src/test/regress/sql/without_overlaps.sql\n\nA few general comments on the tests:\n\n- In the INSERT commands, specify the column names explicitly. This \nmakes the tests easier to read (especially since the column order \nbetween the PK and the FK table is sometimes different).\n\n- Let's try to make it so that the inserted literals match the values \nshown in the various error messages, so it's easier to match them up. \nSo, change the int4range literals to half-open notation. And also maybe \nchange the date output format to ISO.\n\n- In various comments, instead of test FK \"child\", maybe use \n\"referencing table\"? Instead of \"parent\", use \"referenced table\" (or \nprimary key table). When I read child and parent I was looking for \ninheritance.\n\n- Consider truncating the test tables before each major block of tests \nand refilling them with fresh data. So it's easier to eyeball the \ntests. Otherwise, there is too much dependency on what earlier tests \nleft behind.\n\nA specific question:\n\nIn this test, a PERIOD marker on the referenced site is automatically \ninferred from the primary key:\n\n+-- with inferred PK on the referenced table:\n+CREATE TABLE temporal_fk_rng2rng (\n+ id int4range,\n+ valid_at tsrange,\n+ parent_id int4range,\n+ CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT \nOVERLAPS),\n+ CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD \nvalid_at)\n+ REFERENCES temporal_rng\n+);\n\nIn your patch, this succeeds. According to the SQL standard, it should \nnot. In subclause 11.8, syntax rule 4b:\n\n\"\"\"\nOtherwise, the table descriptor of the referenced table shall include a \nunique constraint UC that specifies PRIMARY KEY. The table constraint \ndescriptor of UC shall not include an application time period name.\n\"\"\"\n\nSo this case is apparently explicitly ruled out.\n\n(It might be ok to make an extension here, but then we should be \nexplicit about it.)", "msg_date": "Mon, 11 Mar 2024 08:46:16 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "+ <para>\n+ If the last column is marked with <literal>PERIOD</literal>,\n+ it is treated in a special way.\n+ While the non-<literal>PERIOD</literal> columns are treated normally\n+ (and there must be at least one of them),\n+ the <literal>PERIOD</literal> column is not compared for equality.\n+ Instead the constraint is considered satisfied\n+ if the referenced table has matching records\n+ (based on the non-<literal>PERIOD</literal> parts of the key)\n+ whose combined <literal>PERIOD</literal> values completely cover\n+ the referencing record's.\n+ In other words, the reference must have a referent for its\nentire duration.\n+ Normally this column would be a range or multirange type,\n+ although any type whose GiST opclass has a \"contained by\" operator\n+ and a <literal>referenced_agg</literal> support function is allowed.\n+ (See <xref linkend=\"gist-extensibility\"/>.)\n+ In addition the referenced table must have a primary key\n+ or unique constraint declared with <literal>WITHOUT PORTION</literal>.\n+ </para>\n\ntypo \"referenced_agg\", in the gist-extensibility.html page is \"referencedagg\"\n<literal>WITHOUT PORTION</literal> should be <literal>WITHOUT OVERLAPS</literal>\n\n+ While the non-<literal>PERIOD</literal> columns are treated normally\n+ (and there must be at least one of them),\n+ the <literal>PERIOD</literal> column is not compared for equality.\nthe above sentence didn't say what is \"normally\"?\nmaybe we can do the following:\n+ While the non-<literal>PERIOD</literal> columns are treated\n+ normally for equality\n+ (and there must be at least one of them),\n+ the <literal>PERIOD</literal> column is not compared for equality.\n\n\n\n+<programlisting>\n+Datum\n+my_range_agg_transfn(PG_FUNCTION_ARGS)\n+{\n+ MemoryContext aggContext;\n+ Oid rngtypoid;\n+ ArrayBuildState *state;\n+\n+ if (!AggCheckCallContext(fcinfo, &amp;aggContext))\n+ elog(ERROR, \"range_agg_transfn called in non-aggregate context\");\n+\n+ rngtypoid = get_fn_expr_argtype(fcinfo-&gt;flinfo, 1);\n+ if (!type_is_range(rngtypoid))\n+ elog(ERROR, \"range_agg must be called with a range\");\n+\n+ if (PG_ARGISNULL(0))\n+ state = initArrayResult(rngtypoid, aggContext, false);\n+ else\n+ state = (ArrayBuildState *) PG_GETARG_POINTER(0);\n+\n+ /* skip NULLs */\n+ if (!PG_ARGISNULL(1))\n+ accumArrayResult(state, PG_GETARG_DATUM(1), false, rngtypoid,\naggContext);\n+\n+ PG_RETURN_POINTER(state);\n+}\n+\n+Datum\n+my_range_agg_finalfn(PG_FUNCTION_ARGS)\n+{\n+ MemoryContext aggContext;\n+ Oid mltrngtypoid;\n+ TypeCacheEntry *typcache;\n+ ArrayBuildState *state;\n+ int32 range_count;\n+ RangeType **ranges;\n+ int i;\n+\n+ if (!AggCheckCallContext(fcinfo, &amp;aggContext))\n+ elog(ERROR, \"range_agg_finalfn called in non-aggregate context\");\n+\n+ state = PG_ARGISNULL(0) ? NULL : (ArrayBuildState *) PG_GETARG_POINTER(0);\n+ if (state == NULL)\n+ /* This shouldn't be possible, but just in case.... */\n+ PG_RETURN_NULL();\n+\n+ /* Also return NULL if we had zero inputs, like other aggregates */\n+ range_count = state-&gt;nelems;\n+ if (range_count == 0)\n+ PG_RETURN_NULL();\n+\n+ mltrngtypoid = get_fn_expr_rettype(fcinfo-&gt;flinfo);\n+ typcache = multirange_get_typcache(fcinfo, mltrngtypoid);\n+\n+ ranges = palloc0(range_count * sizeof(RangeType *));\n+ for (i = 0; i &lt; range_count; i++)\n+ ranges[i] = DatumGetRangeTypeP(state-&gt;dvalues[i]);\n+\n+ PG_RETURN_MULTIRANGE_P(make_multirange(mltrngtypoid,\ntypcache-&gt;rngtype, range_count, ranges));\n+}\n\nmy_range_agg_transfn error message is inconsistent?\n `elog(ERROR, \"range_agg_transfn called in non-aggregate context\");`\n`elog(ERROR, \"range_agg must be called with a range\");`\nmaybe just `my_range_agg_transfn`, instead of mention\n{range_agg_transfn|range_agg}\nsimilarly my_range_agg_finalfn error is also inconsistent.\n\nmy_range_agg_finalfn need `type_is_multirange(mltrngtypoid)`?\n\n\n", "msg_date": "Tue, 12 Mar 2024 10:45:21 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Mon, Mar 11, 2024 at 3:46 PM Peter Eisentraut <peter@eisentraut.org> wrote:\n>\n> A few general comments on the tests:\n>\n> - In the INSERT commands, specify the column names explicitly. This\n> makes the tests easier to read (especially since the column order\n> between the PK and the FK table is sometimes different).\n>\n> - Let's try to make it so that the inserted literals match the values\n> shown in the various error messages, so it's easier to match them up.\n> So, change the int4range literals to half-open notation. And also maybe\n> change the date output format to ISO.\n>\nmaybe just change the tsrange type to daterange, then the dot out file\nwill be far less verbose.\n\nminor issues while reviewing v27, 0001 to 0004.\ntransformFkeyGetPrimaryKey comments need to update,\nsince bool pk_period also returned.\n\n+/*\n+ * FindFKComparisonOperators -\n+ *\n+ * Gets the operators for pfeqopOut, ppeqopOut, and ffeqopOut.\n+ * Sets old_check_ok if we can avoid re-validating the constraint.\n+ * Sets old_pfeqop_item to the old pfeqop values.\n+ */\n+static void\n+FindFKComparisonOperators(Constraint *fkconstraint,\n+ AlteredTableInfo *tab,\n+ int i,\n+ int16 *fkattnum,\n+ bool *old_check_ok,\n+ ListCell **old_pfeqop_item,\n+ Oid pktype, Oid fktype, Oid opclass,\n+ Oid *pfeqopOut, Oid *ppeqopOut, Oid *ffeqopOut)\n\nI think the above comments is\n`Sets the operators for pfeqopOut, ppeqopOut, and ffeqopOut.`.\n\n\n+ if (is_temporal)\n+ {\n+ if (!fkconstraint->fk_with_period)\n+ ereport(ERROR,\n+ (errcode(ERRCODE_INVALID_FOREIGN_KEY),\n+ errmsg(\"foreign key uses PERIOD on the referenced table but not the\nreferencing table\")));\n+ }\ncan be\nif (is_temporal && !fkconstraint->fk_with_period)\nereport(ERROR,\n(errcode(ERRCODE_INVALID_FOREIGN_KEY),\nerrmsg(\"foreign key uses PERIOD on the referenced table but not the\nreferencing table\")));\n\n+\n+ if (is_temporal)\n+ {\n+ if (!fkconstraint->pk_with_period)\n+ /* Since we got pk_attrs, one should be a period. */\n+ ereport(ERROR,\n+ (errcode(ERRCODE_INVALID_FOREIGN_KEY),\n+ errmsg(\"foreign key uses PERIOD on the referencing table but not the\nreferenced table\")));\n+ }\ncan be\nif (is_temporal && !fkconstraint->pk_with_period)\n/* Since we got pk_attrs, one should be a period. */\nereport(ERROR,\n(errcode(ERRCODE_INVALID_FOREIGN_KEY),\nerrmsg(\"foreign key uses PERIOD on the referencing table but not the\nreferenced table\")));\n\nrefactor decompile_column_index_array seems unnecessary.\nPeter already mentioned it at [1], I have tried to fix it at [2].\n\n\n@@ -12141,7 +12245,8 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid\n*indexOid,\n /*\n * Now build the list of PK attributes from the indkey definition (we\n- * assume a primary key cannot have expressional elements)\n+ * assume a primary key cannot have expressional elements, unless it\n+ * has a PERIOD)\n */\n *attnamelist = NIL;\n for (i = 0; i < indexStruct->indnkeyatts; i++)\n@@ -12155,6 +12260,8 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid\n*indexOid,\n makeString(pstrdup(NameStr(*attnumAttName(pkrel, pkattno)))));\n }\n+ *pk_period = (indexStruct->indisexclusion);\n\nI don't understand the \"expression elements\" in the comments, most of\nthe tests case is like\n`\nPRIMARY KEY (id, valid_at WITHOUT OVERLAPS)\n`\n+ *pk_period = (indexStruct->indisexclusion);\ncan be\n`+ *pk_period = indexStruct->indisexclusion;`\n\n\n[1] https://postgr.es/m/7be8724a-5c25-46d7-8325-1bd8be6fa523@eisentraut.org\n[2] https://postgr.es/m/CACJufxHVg65raNhG2zBwXgjrD6jqace4NZbePyMhP8-_Q=iT8w@mail.gmail.com\n\n\n", "msg_date": "Tue, 12 Mar 2024 10:47:02 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "in GetOperatorFromWellKnownStrategy:\n*strat = GistTranslateStratnum(opclass, instrat);\nif (*strat == InvalidStrategy)\n{\nHeapTuple tuple;\ntuple = SearchSysCache1(CLAOID, ObjectIdGetDatum(opclass));\nif (!HeapTupleIsValid(tuple))\nelog(ERROR, \"cache lookup failed for operator class %u\", opclass);\nereport(ERROR,\nerrcode(ERRCODE_UNDEFINED_OBJECT),\nerrmsg(errstr, format_type_be(opcintype)),\nerrdetail(\"Could not translate strategy number %d for operator class\n\\\"%s\\\" for access method \\\"%s\\\".\",\n instrat, NameStr(((Form_pg_opclass) GETSTRUCT(tuple))->opcname), \"gist\"));\nReleaseSysCache(tuple);\n}\n\nlast `ReleaseSysCache(tuple);` is unreachable?\n\n\n@@ -118,12 +120,17 @@ typedef struct RI_ConstraintInfo\n int16 confdelsetcols[RI_MAX_NUMKEYS]; /* attnums of cols to set on\n * delete */\n char confmatchtype; /* foreign key's match type */\n+ bool temporal; /* if the foreign key is temporal */\n int nkeys; /* number of key columns */\n int16 pk_attnums[RI_MAX_NUMKEYS]; /* attnums of referenced cols */\n int16 fk_attnums[RI_MAX_NUMKEYS]; /* attnums of referencing cols */\n Oid pf_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK = FK) */\n Oid pp_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK = PK) */\n Oid ff_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (FK = FK) */\n+ Oid period_contained_by_oper; /* operator for PERIOD SQL */\n+ Oid agged_period_contained_by_oper; /* operator for PERIOD SQL */\n+ Oid period_referenced_agg_proc; /* proc for PERIOD SQL */\n+ Oid period_referenced_agg_rettype; /* rettype for previous */\n\nthe comment seems not clear to me. Here is my understanding about it:\nperiod_contained_by_oper is the operator where a single period/range\ncontained by a single period/range.\nagged_period_contained_by_oper is the operator oid where a period\ncontained by a bound of periods\nperiod_referenced_agg_proc is the oprcode of the agged_period_contained_by_oper.\nperiod_referenced_agg_rettype is the function\nperiod_referenced_agg_proc returning data type.\n\n\n", "msg_date": "Thu, 14 Mar 2024 08:00:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Hello,\n\nHere is a new patch series addressing the last few feedback emails\nfrom Peter & Jian He. It mostly focuses on the FKs patch, trying to\nget it really ready to commit, but it also finishes restoring all the\nfunctionality to the PERIODs patch (that I removed temporarily when we\nchanged PERIODs to GENERATED columns). I still want to restore a few\nmore tests there, but all the functionality is back (e.g. PERIODs with\nforeign keys and FOR PORTION OF), so it proves the GENERATED idea\nworks in principle. Specific feedback below:\n\nOn Mon, Mar 11, 2024 at 12:46 AM Peter Eisentraut <peter@eisentraut.org> wrote:\n> I had committed v27-0001-Rename-conwithoutoverlaps-to-conperiod.patch a\n> little while ago.\n\nThanks! It looks like you also fixed the pg_catalog docs which I missed.\n\n> Attached is a small patch that changes the PERIOD keyword to unreserved\n> for this patch. You had said earlier that this didn't work for you.\n> The attached patch works for me when applied on top of 0003.\n\nApplied and included here.\n\n> You wrote:\n>\n> > I'm not sure how else to do it. The issue is that `range_agg` returns\n> > a multirange, so the result\n> > type doesn't match the inputs. But other types will likely have the\n> > same problem: to combine boxes\n> > you may need a multibox. The combine mdranges you may need a\n> > multimdrange.\n>\n> Can we just hardcode the use of range_agg for this release? Might be\n> easier. I don't see all this generality being useful in the near future.\n\nOkay, I've hard-coded range_agg in the main patch and separated the\nsupport for multirange/etc in the next two patches. But there isn't\nmuch code there (mostly tests and docs). Since we can't hard-code the\n*operators*, most of the infrastructure is already there not to\nhard-code the aggregate function. Supporting multiranges is already a\nnice improvement. E.g. it should cut down on disk usage when a record\ngets updated frequently. Supporting arbitrary types also seems very\npowerful, and we already do that for PKs.\n\n> > Btw that part changed a bit since v24 because as jian he pointed out,\n> > our type system doesn't\n> > support anyrange inputs and an anyrange[] output. So I changed the\n> > support funcs to use SETOF.\n>\n> I didn't see any SETOF stuff in the patch, or I didn't know where to look.\n>\n> I'm not sure I follow all the details here. So more explanations of any\n> kind could be helpful.\n\nThis is talking about the FOR PORTION OF patch, not the FKs patch. It\nis the function that gives the \"leftovers\" after a temporal\nUPDATE/DELETE. There is explanation in the preliminary patch (adding\nthe support function) and the actual FOR PORTION OF patch, but if you\nthink they need more let me know.\n\nBut I'd love to talk more about this here: The reason for using a\nSETOF function is because you can't return an anyarray from a function\nthat takes anyrange or anymultirange. Or rather if you do, the array\nelements match the rangetype's bounds' type, not the rangetype itself:\n`T[] f(rangetype<T>)`, not `rangetype<T>[] f(rangetype<T>)`, and we\nneed the latter. So to get a list of rangetype objects we do a SETOF\nfunction that is `anyrange f(anyrange)`. Personally I think an\nimprovement would be to add a broken-out patch to add pseudotypes\ncalled anyrangearray and anymultirangearray, but using SETOF works\nnow, and I don't know if anyone is interested in such a patch. But\nit's not the first time I've hit this shortcoming in the pg type\nsystem, so I think it's worthwhile. And since FOR PORTION OF isn't\ngetting into v17, there is time to do it. What do you think? If it's\nan acceptable idea I will get started. It should be a separate\ncommitfest entry I think.\n\n> * v27-0003-Refactor-FK-operator-lookup.patch\n>\n> I suggest to skip this refactoring patch. I don't think the way this is\n> sliced up is all that great, and it doesn't actually help with the\n> subsequent patches.\n\nOkay.\n\n> - src/backend/catalog/pg_constraint.c\n>\n> FindFKPeriodOpersAndProcs() could use a bit more top-level\n> documentation. Where does the input opclass come from? What are the\n> three output values? What is the business with \"symmetric types\"?\n\nAdded and tried to clarify about the types.\n\n> - src/backend/commands/indexcmds.c\n>\n> GetOperatorFromWellKnownStrategy() is apparently changed to accept\n> InvalidOid for rhstype, but the meaning of this is not explained in\n> the function header. It's also not clear to me why an existing caller\n> is changed. This should be explained more thoroughly.\n\nIt's not so much changing a param as removing one and adding another.\nThe old param was unneeded because it's just the opclass's opcintype,\nand we're already passing the opclass. Then the new param lets you\noptionally ask for an operator that is not `opcintype op opcintype`\nbut `opcintype op rhstype`. We need this because FKs compare fkattr <@\nrange_agg(pkattr)`, and range_agg returns a multirange, not a range.\nEven if we hard-code range_agg, the easiest way to get the operator is\nto use this function, passing ANYMULTIRANGEOID (but better is to pass\nwhatever the referencedagg support func returns, as the now-separate\nmultirange/custom type patch does).\n\n> - src/backend/commands/tablecmds.c\n>\n> is_temporal and similar should be renamed to with_period or similar\n> throughout this patch.\n\nDone.\n\n> In transformFkeyGetPrimaryKey():\n>\n> * Now build the list of PK attributes from the indkey definition (we\n> - * assume a primary key cannot have expressional elements)\n> + * assume a primary key cannot have expressional elements, unless it\n> + * has a PERIOD)\n>\n> I think the original statement is still true even with PERIOD. The\n> expressional elements refer to expression indexes. I don't think we can\n> have a PERIOD marker on an expression?\n\nYou're right: I wrote this back before PERIODs became GENERATED\ncolumns. Updated now.\n\n> - src/backend/utils/adt/ri_triggers.c\n>\n> Please remove the separate trigger functions for the period case. They\n> are the same as the non-period ones, so we don't need separate ones.\n> The difference is handled lower in the call stack, which I think is a\n> good setup. Removing the separate functions also removes a lot of extra\n> code in other parts of the patch.\n\nDone. The later patch for FKs with CASCADE/SET NULL/SET DEFAULT still\nhas separate functions (since they call actually-different\nimplementations), but I will see if I can unify things a bit more\nthere.\n\n> - src/include/catalog/pg_constraint.h\n>\n> Should also update catalogs.sgml accordingly.\n\nLooks like you did this already in 030e10ff1a.\n\n> - src/test/regress/expected/without_overlaps.out\n> - src/test/regress/sql/without_overlaps.sql\n>\n> A few general comments on the tests:\n>\n> - In the INSERT commands, specify the column names explicitly. This\n> makes the tests easier to read (especially since the column order\n> between the PK and the FK table is sometimes different).\n\nOkay.\n\n> - Let's try to make it so that the inserted literals match the values\n> shown in the various error messages, so it's easier to match them up.\n> So, change the int4range literals to half-open notation. And also maybe\n> change the date output format to ISO.\n\nDone. Also changed the tsrange cols to daterange and made them\nYYYY-MM-DD. This is much easier to read IMO.\n\nNote there were already a few tsrange columns in the PK tests, so I\nchanged those separately in the very first patch here.\n\n> - In various comments, instead of test FK \"child\", maybe use\n> \"referencing table\"? Instead of \"parent\", use \"referenced table\" (or\n> primary key table). When I read child and parent I was looking for\n> inheritance.\n\nDone.\n\n> - Consider truncating the test tables before each major block of tests\n> and refilling them with fresh data. So it's easier to eyeball the\n> tests. Otherwise, there is too much dependency on what earlier tests\n> left behind.\n\nDone. This will also let me reuse ids in the FOR PORTION OF\npartitioned table tests, but that's not done yet.\n\n> A specific question:\n>\n> In this test, a PERIOD marker on the referenced site is automatically\n> inferred from the primary key:\n>\n> +-- with inferred PK on the referenced table:\n> +CREATE TABLE temporal_fk_rng2rng (\n> + id int4range,\n> + valid_at tsrange,\n> + parent_id int4range,\n> + CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT\n> OVERLAPS),\n> + CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD\n> valid_at)\n> + REFERENCES temporal_rng\n> +);\n>\n> In your patch, this succeeds. According to the SQL standard, it should\n> not. In subclause 11.8, syntax rule 4b:\n>\n> \"\"\"\n> Otherwise, the table descriptor of the referenced table shall include a\n> unique constraint UC that specifies PRIMARY KEY. The table constraint\n> descriptor of UC shall not include an application time period name.\n> \"\"\"\n>\n> So this case is apparently explicitly ruled out.\n>\n> (It might be ok to make an extension here, but then we should be\n> explicit about it.)\n\nOkay, I agree it doesn't match the standard. IMO our behavior is\nbetter, but the patches here should let you go either way. The main FK\npatch keeps the old behavior, but there is a follow-up patch doing\nwhat the standard says. There are some interesting implications, which\nyou can see by looking at the test changes in that patch. Basically\nyou can never give an inferred REFERENCES against a temporal table.\nEither your FK has a PERIOD element, and it fails because we exclude\nthe PK's WITHOUT OVERLAPS in the inferred attributes, or your FK does\nnot have a PERIOD element, and it fails because you want a PK side\nthat is genuinely unique, but the PK index has a temporal definition\nof \"unique\" (and is not B-tree but GiST).\n\nI don't see any drawbacks from supporting inferred REFERENCES with\ntemporal tables, so my vote is to break from the standard here, and\n*not* apply that follow-up patch. Should I add some docs about that?\nAlso skipping the patch will cause some annoying merge conflicts, so\nlet me know if that's what you choose and I'll handle them right away.\n\nBtw I tried checking what other vendors do here, but no one supports\ntemporal FKs yet! MS SQL Server doesn't support application time at\nall. Oracle and MariaDB don't support temporal PKs or FKs. And IBM DB2\nonly supports temporal PKs. Actually DB2's docs in 2019 were\n*claiming* they supported temporal FKs, but it didn't work for me or\nat least one other person posting in their forums. And the latest docs\nno longer mention it.[1] I wrote about trying to make it work in my\nsurvey of other vendors.[2] The old docs are now a 404,[3] as is the\nforums post.[4] My DB2 test code is below in case anyone else wants to\ntry.[5] So there is no precedent here for us to follow.\n\nIncidentally, here are two non-standard things I would like to add \"some day\":\n\n1. FKs from non-temporal tables to temporal tables. Right now temporal\ntables are \"contagious\", which can be annoying. Maybe a non-temporal\nrecord is valid as long as a referenced temporal row exists at *any\ntime*. You can't do that today. You can't even add an additional\nUNIQUE constraint, because there are surely duplicates that invalidate\nit. This kind of FK would be satisfied if *at least one* reference\nexists.\n\n2. FKs from a single-timestamp table to a temporal table. Maybe the\nreferring table is an \"event\" with no duration, but it is valid as\nlong as the referenced table contains it. A workaround is to have a\nrange that is `[t,t]`, but that's annoying.\n\nAnyway that's not important for these patches. As far as I can tell,\nwhatever we choose re inferred PERIOD in REFERENCES keeps our options\nopen for those ideas.\n\nOne more thought: if we wanted to be cheekily compatible with the\nstandard, we could infer *range types* that are WITHOUT OVERLAPs but\nnot true PERIOD objects. \"The table constraint descriptor of UC shall\nnot include an application time period name.\" If it's a rangetype\ncolumn, then it doesn't include a period name. :-P. So then we would\nskip the follow-up patch here but I could work it into the final patch\nfor PERIOD support. This is probably not the wisest choice, although I\nguess it does let us defer deciding what to do.\n\nOn Mon, Mar 11, 2024 at 7:45 PM jian he <jian.universality@gmail.com> wrote:\n> typo \"referenced_agg\", in the gist-extensibility.html page is \"referencedagg\"\n> <literal>WITHOUT PORTION</literal> should be <literal>WITHOUT OVERLAPS</literal>\n\nGood catch! Fixed.\n\n> + While the non-<literal>PERIOD</literal> columns are treated normally\n> + (and there must be at least one of them),\n> + the <literal>PERIOD</literal> column is not compared for equality.\n> the above sentence didn't say what is \"normally\"?\n> maybe we can do the following:\n> + While the non-<literal>PERIOD</literal> columns are treated\n> + normally for equality\n> + (and there must be at least one of them),\n> + the <literal>PERIOD</literal> column is not compared for equality.\n\nReworked the language here.\n\n> my_range_agg_transfn error message is inconsistent?\n> `elog(ERROR, \"range_agg_transfn called in non-aggregate context\");`\n> `elog(ERROR, \"range_agg must be called with a range\");`\n> maybe just `my_range_agg_transfn`, instead of mention\n> {range_agg_transfn|range_agg}\n> similarly my_range_agg_finalfn error is also inconsistent.\n\nThis matches what other aggs do (e.g. array_agg, json_agg, etc.) as\nwell as the actual core range_agg code. And I think it is an\nappropriate difference. You only hit the first error if you are\ninvoking the transfn directly, so that's what we should say. OTOH you\nhit the second error by calling the aggregate function, but with the\nwrong type. So the error message should mention the aggregate\nfunction.\n\n> my_range_agg_finalfn need `type_is_multirange(mltrngtypoid)`?\n\nThis isn't part of the core range_agg_finalfn, so I'd rather not\ninclude it here. And I don't think it is needed. You would only get a\nnon-multirange if the transfn does something wrong, and even if it\ndoes, the error will be caught and reported in\nmultirange_get_typcache.\n\nOn Mon, Mar 11, 2024 at 7:47 PM jian he <jian.universality@gmail.com> wrote:\n> maybe just change the tsrange type to daterange, then the dot out file\n> will be far less verbose.\n\nAgreed, done.\n\n> minor issues while reviewing v27, 0001 to 0004.\n> transformFkeyGetPrimaryKey comments need to update,\n> since bool pk_period also returned.\n\npk_period is no longer returned in this latest patch.\n\n> +/*\n> + * FindFKComparisonOperators -\n> + *\n> + * Gets the operators for pfeqopOut, ppeqopOut, and ffeqopOut.\n> + * Sets old_check_ok if we can avoid re-validating the constraint.\n> + * Sets old_pfeqop_item to the old pfeqop values.\n> + */\n> +static void\n> +FindFKComparisonOperators(Constraint *fkconstraint,\n> + AlteredTableInfo *tab,\n> + int i,\n> + int16 *fkattnum,\n> + bool *old_check_ok,\n> + ListCell **old_pfeqop_item,\n> + Oid pktype, Oid fktype, Oid opclass,\n> + Oid *pfeqopOut, Oid *ppeqopOut, Oid *ffeqopOut)\n>\n> I think the above comments is\n> `Sets the operators for pfeqopOut, ppeqopOut, and ffeqopOut.`.\n\nThis whole function is removed.\n\n> + if (is_temporal)\n> + {\n> + if (!fkconstraint->fk_with_period)\n> + ereport(ERROR,\n> + (errcode(ERRCODE_INVALID_FOREIGN_KEY),\n> + errmsg(\"foreign key uses PERIOD on the referenced table but not the\n> referencing table\")));\n> + }\n> can be\n> if (is_temporal && !fkconstraint->fk_with_period)\n> ereport(ERROR,\n> (errcode(ERRCODE_INVALID_FOREIGN_KEY),\n> errmsg(\"foreign key uses PERIOD on the referenced table but not the\n> referencing table\")));\n\nThe patch about inferred REFERENCES moves things around a bit, so this\nno longer applies.\n\n> + if (is_temporal)\n> + {\n> + if (!fkconstraint->pk_with_period)\n> + /* Since we got pk_attrs, one should be a period. */\n> + ereport(ERROR,\n> + (errcode(ERRCODE_INVALID_FOREIGN_KEY),\n> + errmsg(\"foreign key uses PERIOD on the referencing table but not the\n> referenced table\")));\n> + }\n> can be\n> if (is_temporal && !fkconstraint->pk_with_period)\n> /* Since we got pk_attrs, one should be a period. */\n> ereport(ERROR,\n> (errcode(ERRCODE_INVALID_FOREIGN_KEY),\n> errmsg(\"foreign key uses PERIOD on the referencing table but not the\n> referenced table\")));\n\nLikewise.\n\n> refactor decompile_column_index_array seems unnecessary.\n> Peter already mentioned it at [1], I have tried to fix it at [2].\n\nNo, that conversation is about handling WITHOUT OVERLAPS, not PERIOD.\nBecause the syntax is `valid_at WITHOUT OVERLAPS` but `PERIOD\nvalid_at` (post vs pre), we must handle PERIOD inside the function.\n\n> I don't understand the \"expression elements\" in the comments, most of\n> the tests case is like\n\nCovered above in Peter's feedback.\n\n> `\n> PRIMARY KEY (id, valid_at WITHOUT OVERLAPS)\n> `\n> + *pk_period = (indexStruct->indisexclusion);\n> can be\n> `+ *pk_period = indexStruct->indisexclusion;`\n\nNo longer included here.\n\nOn Wed, Mar 13, 2024 at 5:00 PM jian he <jian.universality@gmail.com> wrote:\n> @@ -118,12 +120,17 @@ typedef struct RI_ConstraintInfo\n> int16 confdelsetcols[RI_MAX_NUMKEYS]; /* attnums of cols to set on\n> * delete */\n> char confmatchtype; /* foreign key's match type */\n> + bool temporal; /* if the foreign key is temporal */\n> int nkeys; /* number of key columns */\n> int16 pk_attnums[RI_MAX_NUMKEYS]; /* attnums of referenced cols */\n> int16 fk_attnums[RI_MAX_NUMKEYS]; /* attnums of referencing cols */\n> Oid pf_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK = FK) */\n> Oid pp_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK = PK) */\n> Oid ff_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (FK = FK) */\n> + Oid period_contained_by_oper; /* operator for PERIOD SQL */\n> + Oid agged_period_contained_by_oper; /* operator for PERIOD SQL */\n> + Oid period_referenced_agg_proc; /* proc for PERIOD SQL */\n> + Oid period_referenced_agg_rettype; /* rettype for previous */\n>\n> the comment seems not clear to me. Here is my understanding about it:\n> period_contained_by_oper is the operator where a single period/range\n> contained by a single period/range.\n> agged_period_contained_by_oper is the operator oid where a period\n> contained by a bound of periods\n> period_referenced_agg_proc is the oprcode of the agged_period_contained_by_oper.\n> period_referenced_agg_rettype is the function\n> period_referenced_agg_proc returning data type.\n\nExpanded these comments a bit.\n\nThanks to you both for such detailed, careful feedback!\n\nRebased to 605062227f.\n\nIf anything else comes up re FKs I'll tackle that first, but otherwise\nI think I will work on some of the outstanding issues in the FOR\nPORTION OF patch (e.g. trigger transition table names). I may\nexperiment with handling the leftover inserts as a separate executor\nnode. If anyone has advice there I'm happy to hear it!\n\nYours,\nPaul\n\n\n[1] https://www.ibm.com/docs/en/db2/11.5?topic=statements-alter-table\n[2] https://illuminatedcomputing.com/posts/2019/08/sql2011-survey/\n[3] https://www.ibm.com/support/knowledgecenter/en/SSEPEK_10.0.0/intro/src/tpc/db2z_integrity.html\n[4] https://www.ibm.com/developerworks/community/forums/html/topic?id=440e07ad-23ee-4b0a-ae23-8c747abca819\n[5] Here is DB2 test code showing temporal FKs don't work. (Note they\ndisobey the standard re declaring `PERIOD p (s, e)` not `PERIOD FOR p\n(s, e)`, and it must be named `business_time`.)\n\n```\ncreate table t (id integer not null, ds date not null, de date not\nnull, name varchar(4000), period business_time (ds, de));\nalter table t add constraint tpk primary key (id, business_time\nwithout overlaps)\ninsert into t values (1, '2000-01-01', '2001-01-01', 'foo');\ncreate table fk (id integer, ds date not null, de date not null,\nperiod business_time (ds, de));\n\n-- all this fails:\nalter table fk add constraint fkfk foreign key (id, period\nbusiness_time) references t (id, period business_time);\nalter table fk add constraint fkfk foreign key (id, business_time)\nreferences t (id, business_time);\nalter table fk add constraint fkfk foreign key (id, period\nbusiness_time) references t;\nalter table fk add constraint fkfk foreign key (id, business_time) references t;\nalter table fk add constraint fkfk foreign key (id, period for\nbusiness_time) references t;\nalter table fk add constraint fkfk foreign key (id, period for\nbusiness_time) references t (id, period for business_time);\nalter table fk add constraint fkfk foreign key (id, business_time\nwithout overlaps) references t;\nalter table fk add constraint fkfk foreign key (id, business_time\nwithout overlaps) references t (id, business_time without overlaps);\nalter table fk add constraint fkfk foreign key (id) references t;\nalter table fk add constraint fkfk foreign key (id) references t (id);\n```", "msg_date": "Sat, 16 Mar 2024 14:37:10 -0700", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Hi, minor issues from 00001 to 0005.\n+ <row>\n+ <entry><function>referencedagg</function></entry>\n+ <entry>aggregates referenced rows' <literal>WITHOUT OVERLAPS</literal>\n+ part</entry>\n+ <entry>13</entry>\n+ </row>\ncomparing with surrounding items, maybe need to add `(optional)`?\nI think the explanation is not good as explained in referencedagg entry below:\n <para>\n An aggregate function. Given values of this opclass,\n it returns a value combining them all. The return value\n need not be the same type as the input, but it must be a\n type that can appear on the right hand side of the \"contained by\"\n operator. For example the built-in <literal>range_ops</literal>\n opclass uses <literal>range_agg</literal> here, so that foreign\n keys can check <literal>fkperiod @> range_agg(pkperiod)</literal>.\n </para>\n\n\n+ In other words, the reference must have a referent for its\nentire duration.\n+ This column must be a column with a range type.\n+ In addition the referenced table must have a primary key\n+ or unique constraint declared with <literal>WITHOUT PORTION</literal>.\n+ </para>\nseems you missed replacing this one.\n\n\nin v28-0002, the function name is FindFKPeriodOpers,\nthen in v28-0005 rename it to FindFKPeriodOpersAndProcs?\nrenaming the function name in a set of patches seems not a good idea?\n\n\n+ <para>\n+ This is used for temporal foreign key constraints.\n+ If you omit this support function, your type cannot be used\n+ as the <literal>PERIOD</literal> part of a foreign key.\n+ </para>\nin v28-0004, I think here \"your type\" should change to \"your opclass\"?\n\n+bool\n+check_amproc_is_aggregate(Oid funcid)\n+{\n+ bool result;\n+ HeapTuple tp;\n+ Form_pg_proc procform;\n+\n+ tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));\n+ if (!HeapTupleIsValid(tp))\n+ elog(ERROR, \"cache lookup failed for function %u\", funcid);\n+ procform = (Form_pg_proc) GETSTRUCT(tp);\n+ result = procform->prokind == 'a';\n+ ReleaseSysCache(tp);\n+ return result;\n+}\nmaybe\n`\nchange procform->prokind == 'a';\n`\nto\n`\nprocform->prokind == PROKIND_AGGREGATE;\n`\nor we can put the whole function to cache/lsyscache.c\nname it just as proc_is_aggregate.\n\n\n- Added pg_dump support.\n- Show the correct syntax in psql \\d output for foreign keys.\nin 28-0002, seems there is no work to correspond to these 2 items in\nthe commit message?\n\n\n@@ -12335,7 +12448,8 @@ validateForeignKeyConstraint(char *conname,\n Relation rel,\n Relation pkrel,\n Oid pkindOid,\n- Oid constraintOid)\n+ Oid constraintOid,\n+ bool temporal)\ndo you need to change the last argument of this function to \"is_period\"?\n\n\n+ sprintf(paramname, \"$%d\", riinfo->nkeys);\n+ sprintf(paramname, \"$%d\", riinfo->nkeys);\ndo you think it worth the trouble to change to snprintf, I found\nrelated post on [1].\n\n[1] https://stackoverflow.com/a/7316500/15603477\n\n\n", "msg_date": "Mon, 18 Mar 2024 07:30:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "one more minor issue related to error reporting.\nI've only applied v28, 0001 to 0005.\n\n-- (parent_id, valid_at) REFERENCES [implicit]\n-- FOREIGN KEY part should specify PERIOD\nCREATE TABLE temporal_fk_rng2rng (\nid int4range,\nvalid_at daterange,\nparent_id int4range,\nCONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS),\nCONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, valid_at)\nREFERENCES temporal_rng\n);\nERROR: number of referencing and referenced columns for foreign key disagree\n\n-- (parent_id, PERIOD valid_at) REFERENCES (id)\nCREATE TABLE temporal_fk_rng2rng (\nid int4range,\nvalid_at daterange,\nparent_id int4range,\nCONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS),\nCONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal_rng (id)\n);\nERROR: foreign key uses PERIOD on the referencing table but not the\nreferenced table\n\nthese error messages seem somehow inconsistent with the comments above?\n\n\n+ else\n+ {\n+ /*\n+ * Check it's a btree; currently this can never fail since no other\n+ * index AMs support unique indexes. If we ever did have other types\n+ * of unique indexes, we'd need a way to determine which operator\n+ * strategy number is equality. (Is it reasonable to insist that\n+ * every such index AM use btree's number for equality?)\n+ */\n+ if (amid != BTREE_AM_OID)\n+ elog(ERROR, \"only b-tree indexes are supported for foreign keys\");\n+ eqstrategy = BTEqualStrategyNumber;\n+ }\n\nthe comments say never fail.\nbut it actually failed. see:\n\n+-- (parent_id) REFERENCES [implicit]\n+-- This finds the PK (omitting the WITHOUT OVERLAPS element),\n+-- but it's not a b-tree index, so it fails anyway.\n+-- Anyway it must fail because the two sides have a different\ndefinition of \"unique\".\n+CREATE TABLE temporal_fk_rng2rng (\n+ id int4range,\n+ valid_at daterange,\n+ parent_id int4range,\n+ CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS),\n+ CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id)\n+ REFERENCES temporal_rng\n+);\n+ERROR: only b-tree indexes are supported for foreign keys\n\nbecause in transformFkeyGetPrimaryKey.\nwe have `if (indexStruct->indisexclusion && i == indexStruct->indnatts - 1)`\nwe have pk_with_period, fk_with_period in Constraint struct.\n\nmaybe we can add a bool argument to transformFkeyGetPrimaryKey\nindicate, this primary key is a conperiod constraint.\nthen we can check condition: the primary key is a conperiod constraint\nand fk_with_period or is pk_with_period is false\n\nI've made a patch to make these error reporting more accurate.\nyou can further refine it.", "msg_date": "Mon, 18 Mar 2024 12:47:18 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Hi All,\n\nA few more changes here:\n\nOn 3/17/24 16:30, jian he wrote:\n > Hi, minor issues from 00001 to 0005.\n > + <row>\n > + <entry><function>referencedagg</function></entry>\n > + <entry>aggregates referenced rows' <literal>WITHOUT OVERLAPS</literal>\n > + part</entry>\n > + <entry>13</entry>\n > + </row>\n > comparing with surrounding items, maybe need to add `(optional)`?\n\nWe do say this function is optional above, in the list of support functions. That seems to be the \nnormal approach. The only other support function that mentions being optional elsewhere is sortsupport.\n\n > I think the explanation is not good as explained in referencedagg entry below:\n > <para>\n > An aggregate function. Given values of this opclass,\n > it returns a value combining them all. The return value\n > need not be the same type as the input, but it must be a\n > type that can appear on the right hand side of the \"contained by\"\n > operator. For example the built-in <literal>range_ops</literal>\n > opclass uses <literal>range_agg</literal> here, so that foreign\n > keys can check <literal>fkperiod @> range_agg(pkperiod)</literal>.\n > </para>\n\nCan you explain what you'd like to see improved here?\n\n > + In other words, the reference must have a referent for its\n > entire duration.\n > + This column must be a column with a range type.\n > + In addition the referenced table must have a primary key\n > + or unique constraint declared with <literal>WITHOUT PORTION</literal>.\n > + </para>\n > seems you missed replacing this one.\n\nI'm not sure what this is referring to. Replaced what?\n\n > in v28-0002, the function name is FindFKPeriodOpers,\n > then in v28-0005 rename it to FindFKPeriodOpersAndProcs?\n > renaming the function name in a set of patches seems not a good idea?\n\nWe'll only apply part 5 if we support more than range types (though I think that would be great). It \ndoesn't make sense to name this function FindFKPeriodOpersAndProcs when it isn't yet finding a proc. \nIf it's a problem to rename it in part 5 perhaps the commits should be squashed by the committer? \nBut I don't see the problem really.\n\n > + <para>\n > + This is used for temporal foreign key constraints.\n > + If you omit this support function, your type cannot be used\n > + as the <literal>PERIOD</literal> part of a foreign key.\n > + </para>\n > in v28-0004, I think here \"your type\" should change to \"your opclass\"?\n\nI think \"your type\" addresses what the user is more likely to care about, but I added some \nclarification here.\n\n > +bool\n > +check_amproc_is_aggregate(Oid funcid)\n > +{\n > + bool result;\n > + HeapTuple tp;\n > + Form_pg_proc procform;\n > +\n > + tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));\n > + if (!HeapTupleIsValid(tp))\n > + elog(ERROR, \"cache lookup failed for function %u\", funcid);\n > + procform = (Form_pg_proc) GETSTRUCT(tp);\n > + result = procform->prokind == 'a';\n > + ReleaseSysCache(tp);\n > + return result;\n > +}\n > maybe\n > `\n > change procform->prokind == 'a';\n > `\n > to\n > `\n > procform->prokind == PROKIND_AGGREGATE;\n > `\n > or we can put the whole function to cache/lsyscache.c\n > name it just as proc_is_aggregate.\n\nAdded the constant reference. Since lsyscache.c already has get_func_prokind, I changed the gist \nvalidation function to call that directly.\n\n > - Added pg_dump support.\n > - Show the correct syntax in psql \\d output for foreign keys.\n > in 28-0002, seems there is no work to correspond to these 2 items in\n > the commit message?\n\nThe changes to psql and pg_dump happen in pg_get_constraintdef_worker and \ndecompile_column_index_array (both in ruleutils.c).\n\n > @@ -12335,7 +12448,8 @@ validateForeignKeyConstraint(char *conname,\n > Relation rel,\n > Relation pkrel,\n > Oid pkindOid,\n > - Oid constraintOid)\n > + Oid constraintOid,\n > + bool temporal)\n > do you need to change the last argument of this function to \"is_period\"?\n\nChanged to hasperiod.\n\n > + sprintf(paramname, \"$%d\", riinfo->nkeys);\n > + sprintf(paramname, \"$%d\", riinfo->nkeys);\n > do you think it worth the trouble to change to snprintf, I found\n > related post on [1].\n >\n > [1] https://stackoverflow.com/a/7316500/15603477\n\nparamname holds 16 chars so I don't think there is any risk of an int overflowing here. The existing \nforeign key code already uses sprintf, so I don't think it makes sense to be inconsistent here. And \nif we want to change it it should probably be in a separate commit, not buried in a commit about \nadding temporal foreign keys.\n\nOn 3/17/24 21:47, jian he wrote:\n > one more minor issue related to error reporting.\n > I've only applied v28, 0001 to 0005.\n >\n > -- (parent_id, valid_at) REFERENCES [implicit]\n > -- FOREIGN KEY part should specify PERIOD\n > CREATE TABLE temporal_fk_rng2rng (\n > id int4range,\n > valid_at daterange,\n > parent_id int4range,\n > CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS),\n > CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, valid_at)\n > REFERENCES temporal_rng\n > );\n > ERROR: number of referencing and referenced columns for foreign key disagree\n >\n > -- (parent_id, PERIOD valid_at) REFERENCES (id)\n > CREATE TABLE temporal_fk_rng2rng (\n > id int4range,\n > valid_at daterange,\n > parent_id int4range,\n > CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS),\n > CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, PERIOD valid_at)\n > REFERENCES temporal_rng (id)\n > );\n > ERROR: foreign key uses PERIOD on the referencing table but not the\n > referenced table\n >\n > these error messages seem somehow inconsistent with the comments above?\n\nClarified the comments.\n\n > + else\n > + {\n > + /*\n > + * Check it's a btree; currently this can never fail since no other\n > + * index AMs support unique indexes. If we ever did have other types\n > + * of unique indexes, we'd need a way to determine which operator\n > + * strategy number is equality. (Is it reasonable to insist that\n > + * every such index AM use btree's number for equality?)\n > + */\n > + if (amid != BTREE_AM_OID)\n > + elog(ERROR, \"only b-tree indexes are supported for foreign keys\");\n > + eqstrategy = BTEqualStrategyNumber;\n > + }\n >\n > the comments say never fail.\n > but it actually failed. see:\n >\n > +-- (parent_id) REFERENCES [implicit]\n > +-- This finds the PK (omitting the WITHOUT OVERLAPS element),\n > +-- but it's not a b-tree index, so it fails anyway.\n > +-- Anyway it must fail because the two sides have a different\n > definition of \"unique\".\n > +CREATE TABLE temporal_fk_rng2rng (\n > + id int4range,\n > + valid_at daterange,\n > + parent_id int4range,\n > + CONSTRAINT temporal_fk_rng2rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS),\n > + CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id)\n > + REFERENCES temporal_rng\n > +);\n > +ERROR: only b-tree indexes are supported for foreign keys\n\nYou're right, now that we have temporal primary keys the comment is out-of-date.\nYou can reach that error message by creating a regular foreign key against a temporal primary key.\n\nPerhaps we should update the comment separately, although I haven't added a new patch for that here.\nI did update the comment as part of this FK patch. I also added \"non-PERIOD\" to the error message\n(which only makes sense in the FK patch). Since the error message was impossible before, I assume \nthat is no problem. I think this is a simpler fix than what you have in your attached patch. In \naddition your patch doesn't work if we include part 3 here: see Peter's feedback about the SQL \nstandard and my reply.\n\nRebased to 846311051e.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Mon, 18 Mar 2024 15:49:28 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Tue, Mar 19, 2024 at 6:49 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> Rebased to 846311051e.\n>\n\nHi, I just found out some minor issues.\n\n+ * types matching the PERIOD element. periodprocoid is a GiST support\nfunction to\n+ * aggregate multiple PERIOD element values into a single value\n+ * (whose return type need not match its inputs,\n+ * e.g. many ranges can be aggregated into a multirange).\n * And aggedperiodoperoid is also a ContainedBy operator,\n- * but one whose rhs is anymultirange.\n+ * but one whose rhs matches the type returned by aggedperiodoperoid.\n * That way foreign keys can compare fkattr <@ range_agg(pkattr).\n */\n void\n-FindFKPeriodOpers(Oid opclass,\n- Oid *periodoperoid,\n- Oid *aggedperiodoperoid)\n+FindFKPeriodOpersAndProcs(Oid opclass,\n+ Oid *periodoperoid,\n+ Oid *aggedperiodoperoid,\n+ Oid *periodprocoid)\n\nI think, aggedperiodoperoid is more descriptive than periodprocoid, in\n0005, you don't need to rename it.\naslo do we need to squash v29 0001 to 0005 together?\n\n--- a/doc/src/sgml/ref/create_table.sgml\n+++ b/doc/src/sgml/ref/create_table.sgml\n@@ -1167,7 +1167,8 @@ WITH ( MODULUS <replaceable\nclass=\"parameter\">numeric_literal</replaceable>, REM\n column(s) of some row of the referenced table. If the <replaceable\n class=\"parameter\">refcolumn</replaceable> list is omitted, the\n primary key of the <replaceable class=\"parameter\">reftable</replaceable>\n- is used. Otherwise, the <replaceable\nclass=\"parameter\">refcolumn</replaceable>\n+ is used (omitting any part declared with <literal>WITHOUT\nOVERLAPS</literal>).\n+ Otherwise, the <replaceable class=\"parameter\">refcolumn</replaceable>\n list must refer to the columns of a non-deferrable unique or primary key\n constraint or be the columns of a non-partial unique index.\n </para>\nI think this does not express that\nforeign key is PERIOD, then the last column of refcolumn must specify PERIOD?\n\n+ <para>\n+ If the last column is marked with <literal>PERIOD</literal>,\n+ it is treated in a special way.\n+ While the non-<literal>PERIOD</literal> columns are compared for equality\n+ (and there must be at least one of them),\n+ the <literal>PERIOD</literal> column is not.\n+ Instead the constraint is considered satisfied\n+ if the referenced table has matching records\n+ (based on the non-<literal>PERIOD</literal> parts of the key)\n+ whose combined <literal>PERIOD</literal> values completely cover\n+ the referencing record's.\n+ In other words, the reference must have a referent for its\nentire duration.\n+ This column must be a column with a range type.\n+ In addition the referenced table must have a primary key\n+ or unique constraint declared with <literal>WITHOUT PORTION</literal>.\n+ </para>\nyou forgot to change <literal>WITHOUT PORTION</literal> to\n<literal>WITHOUT OVERLAPS</literal>\n\n\nOid pf_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK = FK) */\nOid pp_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK = PK) */\nOid ff_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (FK = FK) */\nin struct RI_ConstraintInfo, these comments need to be updated?\n\n\n", "msg_date": "Tue, 19 Mar 2024 17:01:02 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 16.03.24 22:37, Paul A Jungwirth wrote:\n> Here is a new patch series addressing the last few feedback emails\n> from Peter & Jian He. It mostly focuses on the FKs patch, trying to\n> get it really ready to commit,\n\nI have committed the test changes (range and date format etc.).\n\nThe FOREIGN KEY patch looks okay to me now. Maybe check if any of the \nsubsequent comments from jian should be applied.\n\n>> > I'm not sure how else to do it. The issue is that `range_agg` returns\n>> > a multirange, so the result\n>> > type doesn't match the inputs. But other types will likely have the\n>> > same problem: to combine boxes\n>> > you may need a multibox. The combine mdranges you may need a\n>> > multimdrange.\n>>\n>> Can we just hardcode the use of range_agg for this release? Might be\n>> easier. I don't see all this generality being useful in the near future.\n> \n> Okay, I've hard-coded range_agg in the main patch and separated the\n> support for multirange/etc in the next two patches. But there isn't\n> much code there (mostly tests and docs). Since we can't hard-code the\n> *operators*, most of the infrastructure is already there not to\n> hard-code the aggregate function. Supporting multiranges is already a\n> nice improvement. E.g. it should cut down on disk usage when a record\n> gets updated frequently. Supporting arbitrary types also seems very\n> powerful, and we already do that for PKs.\n\nI think we could also handle multiranges in a hardcoded way? Ranges and \nmultiranges are hardcoded concepts anyway. It's just when we move to \narbitrary types supporting containment, then it gets a bit more complicated.\n\nWhat would a patch that adds just multiranges on the FK side, but \nwithout the full pluggable gist support, look like?\n\n> I don't see any drawbacks from supporting inferred REFERENCES with\n> temporal tables, so my vote is to break from the standard here, and\n> *not* apply that follow-up patch. Should I add some docs about that?\n> Also skipping the patch will cause some annoying merge conflicts, so\n> let me know if that's what you choose and I'll handle them right away.\n\nI agree we can allow this.\n\n\n\n", "msg_date": "Tue, 19 Mar 2024 12:02:45 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 3/19/24 04:02, Peter Eisentraut wrote:\n > On 16.03.24 22:37, Paul A Jungwirth wrote:\n >> Here is a new patch series addressing the last few feedback emails\n >> from Peter & Jian He. It mostly focuses on the FKs patch, trying to\n >> get it really ready to commit,\n >\n > I have committed the test changes (range and date format etc.).\n >\n > The FOREIGN KEY patch looks okay to me now. Maybe check if any of the subsequent comments from jian\n > should be applied.\n\nOkay, specifics below.\n\n > I think we could also handle multiranges in a hardcoded way? Ranges and multiranges are hardcoded\n > concepts anyway. It's just when we move to arbitrary types supporting containment, then it gets a\n > bit more complicated.\n >\n > What would a patch that adds just multiranges on the FK side, but without the full pluggable gist\n > support, look like?\n\nAttached a separate patch extending FKs to multiranges only. I'd still love to support arbitrary \ntypes eventually but it's not part of the patches here now.\n\n >> I don't see any drawbacks from supporting inferred REFERENCES with\n >> temporal tables, so my vote is to break from the standard here, and\n >> *not* apply that follow-up patch. Should I add some docs about that?\n >> Also skipping the patch will cause some annoying merge conflicts, so\n >> let me know if that's what you choose and I'll handle them right away.\n >\n > I agree we can allow this.\n\nGreat, thanks! Took out those changes.\n\nOn 3/19/24 02:01, jian he wrote:\n > + * types matching the PERIOD element. periodprocoid is a GiST support\n > function to\n > + * aggregate multiple PERIOD element values into a single value\n > + * (whose return type need not match its inputs,\n > + * e.g. many ranges can be aggregated into a multirange).\n > * And aggedperiodoperoid is also a ContainedBy operator,\n > - * but one whose rhs is anymultirange.\n > + * but one whose rhs matches the type returned by aggedperiodoperoid.\n > * That way foreign keys can compare fkattr <@ range_agg(pkattr).\n > */\n > void\n > -FindFKPeriodOpers(Oid opclass,\n > - Oid *periodoperoid,\n > - Oid *aggedperiodoperoid)\n > +FindFKPeriodOpersAndProcs(Oid opclass,\n > + Oid *periodoperoid,\n > + Oid *aggedperiodoperoid,\n > + Oid *periodprocoid)\n >\n > I think, aggedperiodoperoid is more descriptive than periodprocoid, in\n > 0005, you don't need to rename it.\n > aslo do we need to squash v29 0001 to 0005 together?\n\nI changed the operator names to {,agged}containedbyoperoid. The proc names are not included now \nbecause we only need them for supporting more than ranges + multiranges.\n\n > --- a/doc/src/sgml/ref/create_table.sgml\n > +++ b/doc/src/sgml/ref/create_table.sgml\n > @@ -1167,7 +1167,8 @@ WITH ( MODULUS <replaceable\n > class=\"parameter\">numeric_literal</replaceable>, REM\n > column(s) of some row of the referenced table. If the <replaceable\n > class=\"parameter\">refcolumn</replaceable> list is omitted, the\n > primary key of the <replaceable class=\"parameter\">reftable</replaceable>\n > - is used. Otherwise, the <replaceable\n > class=\"parameter\">refcolumn</replaceable>\n > + is used (omitting any part declared with <literal>WITHOUT\n > OVERLAPS</literal>).\n > + Otherwise, the <replaceable class=\"parameter\">refcolumn</replaceable>\n > list must refer to the columns of a non-deferrable unique or primary key\n > constraint or be the columns of a non-partial unique index.\n > </para>\n > I think this does not express that\n > foreign key is PERIOD, then the last column of refcolumn must specify PERIOD?\n\nOkay, added a sentence about that (and adjusted some other things re allowing implicit REFERENCES \nand only supporting ranges + multiranges).\n\n > + <para>\n > + If the last column is marked with <literal>PERIOD</literal>,\n > + it is treated in a special way.\n > + While the non-<literal>PERIOD</literal> columns are compared for equality\n > + (and there must be at least one of them),\n > + the <literal>PERIOD</literal> column is not.\n > + Instead the constraint is considered satisfied\n > + if the referenced table has matching records\n > + (based on the non-<literal>PERIOD</literal> parts of the key)\n > + whose combined <literal>PERIOD</literal> values completely cover\n > + the referencing record's.\n > + In other words, the reference must have a referent for its\n > entire duration.\n > + This column must be a column with a range type.\n > + In addition the referenced table must have a primary key\n > + or unique constraint declared with <literal>WITHOUT PORTION</literal>.\n > + </para>\n > you forgot to change <literal>WITHOUT PORTION</literal> to\n > <literal>WITHOUT OVERLAPS</literal>\n\nOh! Thanks, I guess I was just blind.\n\n > Oid pf_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK = FK) */\n > Oid pp_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (PK = PK) */\n > Oid ff_eq_oprs[RI_MAX_NUMKEYS]; /* equality operators (FK = FK) */\n > in struct RI_ConstraintInfo, these comments need to be updated?\n\nIn earlier feedback Peter advised not changing the \"equals\" language (e.g. in KeysEqual). But I \nadded a comment at the top of the struct to clarify.\n\nRebased to 605721f819.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Tue, 19 Mar 2024 21:03:44 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "hi.\nminor cosmetic issues, other than that, looks good.\n\n*pk_period = (indexStruct->indisexclusion);\nto\n*pk_period = indexStruct->indisexclusion;\n\n\nif (with_period)\n{\nif (!fkconstraint->fk_with_period)\nereport(ERROR,\n(errcode(ERRCODE_INVALID_FOREIGN_KEY),\nerrmsg(\"foreign key uses PERIOD on the referenced table but not the\nreferencing table\")));\n}\n\nchange to\n\nif (with_period && !fkconstraint->fk_with_period)\nereport(ERROR,\n(errcode(ERRCODE_INVALID_FOREIGN_KEY),\nerrmsg(\"foreign key uses PERIOD on the referenced table but not the\nreferencing table\")));\n\n\n", "msg_date": "Wed, 20 Mar 2024 18:55:14 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 3/20/24 03:55, jian he wrote:\n> hi.\n> minor cosmetic issues, other than that, looks good.\n> \n> *pk_period = (indexStruct->indisexclusion);\n> to\n> *pk_period = indexStruct->indisexclusion;\n> \n> ... >\n> if (with_period && !fkconstraint->fk_with_period)\n> ereport(ERROR,\n> (errcode(ERRCODE_INVALID_FOREIGN_KEY),\n> errmsg(\"foreign key uses PERIOD on the referenced table but not the\n> referencing table\")));\n\nBoth included in the new patches here.\n\nRebased to a0390f6ca6.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Wed, 20 Mar 2024 09:21:44 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "with foreign key \"no action\",\nin a transaction, we can first insert foreign key data, then primary key data.\nalso the update/delete can fail at the end of transaction.\n\nbased on [1] explanation about the difference between \"no action\" and\n\"restrict\".\nI only refactor the v31-0002-Support-multiranges-in-temporal-FKs.patch test.\n\n\n[1 https://stackoverflow.com/questions/14921668/difference-between-restrict-and-no-action", "msg_date": "Thu, 21 Mar 2024 16:25:15 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 20.03.24 17:21, Paul Jungwirth wrote:\n> On 3/20/24 03:55, jian he wrote:\n>> hi.\n>> minor cosmetic issues, other than that, looks good.\n>>\n>> *pk_period = (indexStruct->indisexclusion);\n>> to\n>> *pk_period = indexStruct->indisexclusion;\n>>\n>> ... >\n>> if (with_period && !fkconstraint->fk_with_period)\n>> ereport(ERROR,\n>> (errcode(ERRCODE_INVALID_FOREIGN_KEY),\n>> errmsg(\"foreign key uses PERIOD on the referenced table but not the\n>> referencing table\")));\n> \n> Both included in the new patches here.\n> \n> Rebased to a0390f6ca6.\n\nTwo more questions:\n\n1. In ri_triggers.c ri_KeysEqual, you swap the order of arguments to \nri_AttributesEqual():\n\n- if (!ri_AttributesEqual(riinfo->ff_eq_oprs[i], \nRIAttType(rel, attnums[i]),\n- oldvalue, newvalue))\n+ if (!ri_AttributesEqual(eq_opr, RIAttType(rel, attnums[i]),\n+ newvalue, oldvalue))\n\nBut the declared arguments of ri_AttributesEqual() are oldvalue and \nnewvalue, so passing them backwards is really confusing. And the change \ndoes matter in the tests.\n\nCan we organize this better?\n\n2. There are some tests that error with\n\nERROR: only b-tree indexes are supported for non-PERIOD foreign keys\n\nBut this is an elog() error, so should not normally be visible. I \nsuspect some other error should really show here, and the order of \nchecks is a bit wrong or something?\n\n\n", "msg_date": "Thu, 21 Mar 2024 15:57:20 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "v32 attached.\n\nOn 3/21/24 07:57, Peter Eisentraut wrote:\n > Two more questions:\n >\n > 1. In ri_triggers.c ri_KeysEqual, you swap the order of arguments to ri_AttributesEqual():\n >\n > - if (!ri_AttributesEqual(riinfo->ff_eq_oprs[i], RIAttType(rel, attnums[i]),\n > - oldvalue, newvalue))\n > + if (!ri_AttributesEqual(eq_opr, RIAttType(rel, attnums[i]),\n > + newvalue, oldvalue))\n >\n > But the declared arguments of ri_AttributesEqual() are oldvalue and newvalue, so passing them\n > backwards is really confusing. And the change does matter in the tests.\n >\n > Can we organize this better?\n\nI renamed the params and actually the whole function. All it's doing is execute `oldvalue op \nnewvalue`, casting if necessary. So I changed it to ri_CompareWithCast and added some documentation. \nIn an earlier version of this patch I had a separate function for the PERIOD comparison, but it's \njust doing the same thing, so I think the best thing is to give the function a more accurate name \nand use it.\n\n > 2. There are some tests that error with\n >\n > ERROR: only b-tree indexes are supported for non-PERIOD foreign keys\n >\n > But this is an elog() error, so should not normally be visible. I suspect some other error should\n > really show here, and the order of checks is a bit wrong or something?\n\nAt first I thought I should just make this ereport, because it is reachable now, but I didn't like \nthe error message or where we were reaching it. The high-level problem is defining a non-temporal FK\nagainst a temporal PK, and we should check for that in those terms, not when looking at individual \nattribute opclasses. So I added a check prior to this and gave it a more descriptive error message.\n\nOn 3/21/24 01:25, jian he wrote:\n > with foreign key \"no action\",\n > in a transaction, we can first insert foreign key data, then primary key data.\n > also the update/delete can fail at the end of transaction.\n >\n > based on [1] explanation about the difference between \"no action\" and\n > \"restrict\".\n > I only refactor the v31-0002-Support-multiranges-in-temporal-FKs.patch test.\n\nI added some tests for deferred NO ACTION checks. I added them for all of range/multirange/PERIOD. I \nalso adopted your change ALTERing the constraint for NO ACTION (even though it's already that), to\nmake each test section more independent. Your patch had a lot of other noisy changes, e.g. \nwhitespace and reordering lines. If there are other things you intended to add to the tests, can you \ndescribe them?\n\nRebased to 7e65ad197f.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Thu, 21 Mar 2024 17:35:53 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Fri, Mar 22, 2024 at 8:35 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> Your patch had a lot of other noisy changes, e.g.\n> whitespace and reordering lines. If there are other things you intended to add to the tests, can you\n> describe them?\n\ni think on update restrict, on delete restrict cannot be deferred,\neven if you set it DEFERRABLE INITIALLY DEFERRED.\nbased on this idea, I made minor change on\nv32-0002-Support-multiranges-in-temporal-FKs.patch\n\nother than that, v32, 0002 looks good.", "msg_date": "Fri, 22 Mar 2024 13:33:37 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 22.03.24 01:35, Paul Jungwirth wrote:\n> > 1. In ri_triggers.c ri_KeysEqual, you swap the order of arguments to \n> ri_AttributesEqual():\n> >\n> > -           if (!ri_AttributesEqual(riinfo->ff_eq_oprs[i], \n> RIAttType(rel, attnums[i]),\n> > -                                   oldvalue, newvalue))\n> > +           if (!ri_AttributesEqual(eq_opr, RIAttType(rel, attnums[i]),\n> > +                                   newvalue, oldvalue))\n> >\n> > But the declared arguments of ri_AttributesEqual() are oldvalue and \n> newvalue, so passing them\n> > backwards is really confusing.  And the change does matter in the tests.\n> >\n> > Can we organize this better?\n> \n> I renamed the params and actually the whole function. All it's doing is \n> execute `oldvalue op newvalue`, casting if necessary. So I changed it to \n> ri_CompareWithCast and added some documentation. In an earlier version \n> of this patch I had a separate function for the PERIOD comparison, but \n> it's just doing the same thing, so I think the best thing is to give the \n> function a more accurate name and use it.\n\nOk, I see now, and the new explanation is better.\n\nBut after reading the comment in the function about collations, I think \nthere could be trouble. As long as we are only comparing for equality \n(and we don't support nondeterministic global collations), then we can \nuse any collation to compare for equality. But if we are doing \ncontained-by, then the collation does matter, so we would need to get \nthe actual collation somehow. So as written, this might not always work \ncorrectly.\n\nI think it would be safer for now if we just kept using the equality \noperation even for temporal foreign keys. If we did that, then in the \ncase that you update a key to a new value that is contained by the old \nvalue, this function would say \"not equal\" and fire all the checks, even \nthough it wouldn't need to. This is kind of similar to the \"false \nnegatives\" that the comment already talks about.\n\nWhat do you think?\n\n\n\n", "msg_date": "Fri, 22 Mar 2024 16:49:29 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Fri, Mar 22, 2024 at 11:49 PM Peter Eisentraut <peter@eisentraut.org> wrote:\n>\n> On 22.03.24 01:35, Paul Jungwirth wrote:\n> > > 1. In ri_triggers.c ri_KeysEqual, you swap the order of arguments to\n> > ri_AttributesEqual():\n> > >\n> > > - if (!ri_AttributesEqual(riinfo->ff_eq_oprs[i],\n> > RIAttType(rel, attnums[i]),\n> > > - oldvalue, newvalue))\n> > > + if (!ri_AttributesEqual(eq_opr, RIAttType(rel, attnums[i]),\n> > > + newvalue, oldvalue))\n> > >\n> > > But the declared arguments of ri_AttributesEqual() are oldvalue and\n> > newvalue, so passing them\n> > > backwards is really confusing. And the change does matter in the tests.\n> > >\n> > > Can we organize this better?\n> >\n> > I renamed the params and actually the whole function. All it's doing is\n> > execute `oldvalue op newvalue`, casting if necessary. So I changed it to\n> > ri_CompareWithCast and added some documentation. In an earlier version\n> > of this patch I had a separate function for the PERIOD comparison, but\n> > it's just doing the same thing, so I think the best thing is to give the\n> > function a more accurate name and use it.\n>\n> Ok, I see now, and the new explanation is better.\n>\n> But after reading the comment in the function about collations, I think\n> there could be trouble. As long as we are only comparing for equality\n> (and we don't support nondeterministic global collations), then we can\n> use any collation to compare for equality. But if we are doing\n> contained-by, then the collation does matter, so we would need to get\n> the actual collation somehow. So as written, this might not always work\n> correctly.\n>\n> I think it would be safer for now if we just kept using the equality\n> operation even for temporal foreign keys. If we did that, then in the\n> case that you update a key to a new value that is contained by the old\n> value, this function would say \"not equal\" and fire all the checks, even\n> though it wouldn't need to. This is kind of similar to the \"false\n> negatives\" that the comment already talks about.\n>\n> What do you think?\n>\n\nwe don't need to worry about primary key and foreign key with\ndifferent collation.\nbecause it will be error out as incompatible data type,\nforeign key constraint will not be created.\n\nif there are the same collation, when we build the query string, we\ndon't need to worry about collation.\nbecause at runtime, the operator associated oprcode\nwill fetch collation information later.\n\nmain operator and the main oprcode related to this patch(0001, 0002) are:\nrange_contained_by_multirange\nrange_eq\nrange_overlaps\nrange_contained_by\nthe first 3 functions will fetch collation information within range_cmp_bounds.\nrange_contained_by will fetch collation information in\nrange_contains_elem_internal.\n\ndemo:\nCREATE COLLATION case_insensitive (provider = icu, locale =\n'und-u-ks-level2', deterministic = false);\nDROP TABLE IF exists temporal_fk_rng2rng;\nDROP TABLE IF exists temporal_rng;\nDROP TYPE textrange_case_insensitive;\ncreate type textrange_case_insensitive as range(subtype=text,\ncollation=case_insensitive);\nCREATE TABLE temporal_rng (id int4range, valid_at textrange_case_insensitive);\nALTER TABLE temporal_rng\nADD CONSTRAINT temporal_rng_pk\nPRIMARY KEY (id, valid_at WITHOUT OVERLAPS);\nCREATE TABLE temporal_fk_rng2rng (\nid int4range,\nvalid_at textrange_case_insensitive,\nparent_id int4range,\nCONSTRAINT temporal_fk_rng2rng_fk2 FOREIGN KEY (parent_id, PERIOD valid_at)\nREFERENCES temporal_rng (id, PERIOD valid_at)\n);\nINSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)',\ntextrange_case_insensitive('c', 'h','[]'));\n\n--fail\nINSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id)\nVALUES ('[1,2)', textrange_case_insensitive('B', 'B','[]'), '[1,2)');\n\n--fail.\nINSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id)\nVALUES ('[1,2)', textrange_case_insensitive('a', 'F','[]'), '[1,2)');\n\n--fail.\nINSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id)\nVALUES ('[1,2)', textrange_case_insensitive('e', 'Z','[]'), '[1,2)');\n\n--ok\nINSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id)\nVALUES ('[1,2)', textrange_case_insensitive('d', 'F','[]'), '[1,2)');\n\n\n", "msg_date": "Sat, 23 Mar 2024 11:02:28 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "v33 attached with minor changes.\n\nOn 3/22/24 20:02, jian he wrote:\n > On Fri, Mar 22, 2024 at 11:49 PM Peter Eisentraut <peter@eisentraut.org> wrote:\n >> But after reading the comment in the function about collations, I think\n >> there could be trouble. As long as we are only comparing for equality\n >> (and we don't support nondeterministic global collations), then we can\n >> use any collation to compare for equality. But if we are doing\n >> contained-by, then the collation does matter, so we would need to get\n >> the actual collation somehow. So as written, this might not always work\n >> correctly.\n >>\n >> I think it would be safer for now if we just kept using the equality\n >> operation even for temporal foreign keys. If we did that, then in the\n >> case that you update a key to a new value that is contained by the old\n >> value, this function would say \"not equal\" and fire all the checks, even\n >> though it wouldn't need to. This is kind of similar to the \"false\n >> negatives\" that the comment already talks about.\n >>\n >> What do you think?\n >>\n >\n > we don't need to worry about primary key and foreign key with\n > different collation.\n > because it will be error out as incompatible data type,\n > foreign key constraint will not be created.\n\nI agree with jian he here. Here is my own investigation:\n\nRangetypes themselves are never collatable (see DefineRange in commands/typecmds.c).\nBut rangetypes do store a collation for their base type. So you can say:\n\npaul=# create type textrange as range (subtype = text, collation = \"C\");\nCREATE TYPE\n\nThat is stored in pg_range.rngcollation, but pg_type.typcollation is always zero.\n\nSo putting a collection on a rangetype column is an error:\n\npaul=# create table t (r1 textrange collate \"en-US-x-icu\");\nERROR: collations are not supported by type textrange\n\nAnd so is using an ad hoc collation with an operator:\n\npaul=# select '[J,J]'::textrange <@ '[a,z]'::textrange collate \"en-US-x-icu\";\nERROR: collations are not supported by type textrange\nLINE 1: select '[J,J]'::textrange <@ '[a,z]'::textrange collate \"en-...\n\nAlmost everything ranges do is built on range_cmp_bounds, which uses the base type's collation. \nThere is no way to use a different one.\nSo when ri_CompareWithCast calls `lhs <@ rhs`, it is using the collation for that range's base type.\nIndexes will use the same collation.\n\nYou also can't mix different range types.\nOur textrange puts (English) lowercase after uppercase:\n\npaul=# select '[j,j]'::textrange <@ '[a,z]'::textrange;\n ?column?\n----------\n t\n(1 row)\n\npaul=# select '[J,J]'::textrange <@ '[a,z]'::textrange;\n ?column?\n----------\n f\n(1 row)\n\nWe could create a rangetype that intermingles uppercase & lower:\n\npaul=# create type itextrange as range (subtype = text, collation = \"en-US-x-icu\");\nCREATE TYPE\npaul=# select '[J,J]'::itextrange <@ '[a,z]'::itextrange;\n ?column?\n----------\n t\n(1 row)\n\nBut you can't mix them:\n\npaul=# select '[J,J]'::itextrange <@ '[a,z]'::textrange;\nERROR: operator does not exist: itextrange <@ textrange\nLINE 1: select '[J,J]'::itextrange <@ '[a,z]'::textrange;\n ^\nHINT: No operator matches the given name and argument types. You might need to add explicit type casts.\n\nEven if I create casts, mixing still fails:\n\npaul=# create cast (textrange as itextrange) without function as implicit;\nCREATE CAST\npaul=# create cast (itextrange as textrange) without function as implicit;\nCREATE CAST\npaul=# select '[J,J]'::itextrange <@ '[a,z]'::textrange;\nERROR: operator does not exist: itextrange <@ textrange\nLINE 1: select '[J,J]'::itextrange <@ '[a,z]'::textrange;\n ^\nHINT: No operator matches the given name and argument types. You might need to add explicit type casts.\n\nThat's because the operator parameters are anyrange, and in can_coerce_type we call \ncheck_generic_type_consistency which doesn't use casts.\nIt just asks if all the concrete range types are the same (as with other polymorphic types).\n\nAdding a foreign key runs the same check:\n\npaul=# create table pk (id int4range, valid_at textrange, constraint pkpk primary key (id, valid_at \nwithout overlaps));\nCREATE TABLE\npaul=# create table fk (id int4range, valid_at itextrange, parent_id int4range);\nCREATE TABLE\npaul=# alter table fk add constraint fkfk foreign key (parent_id, period valid_at) references pk;\nERROR: foreign key constraint \"fkfk\" cannot be implemented\nDETAIL: Key columns \"valid_at\" and \"valid_at\" are of incompatible types: itextrange and textrange.\n\nI guess the user could define their own `textrange <@ itextrange` operator, using the lhs collation.\nWe would choose that operator for pfeqop but not ppeqop or ffeqop.\nAnd we use ffeqop here, which would allow us to skip a check that pfeqop would fail.\nIs that an issue? It feels like the user is doing their best to get nonsense results at that point,\nand it's not really about the collation per se.\n\nIncidentally here is another separate issue with foreign keys and collations I noticed this morning: \nhttps://www.postgresql.org/message-id/78d824e0-b21e-480d-a252-e4b84bc2c24b%40illuminatedcomputing.com\nThat comes from nondeterministic collations, which feel like a troublesome thing here.\nProbably foreign keys just weren't fully re-thought when we added them.\n\nBut we avoid the issue from 59a85cb4 (discussion at \nhttps://www.postgresql.org/message-id/flat/3326fc2e-bc02-d4c5-e3e5-e54da466e89a@2ndquadrant.com) \nabout cascading changes when a PK experiences a not-binary-identical change that the collation \nconsiders equal. These days we only call ri_CompareWithCast for changes on the FK side.\n\nNow this is a long chain of reasoning to say rangetypes are safe. I added a comment. Note it doesn't \napply to arbitrary types, so if we support those eventually we should just require a recheck always, \nor alternately use equals, not containedby. (That would require storing equals somewhere. It could \ngo in ffeqop, but that feels like a footgun since pfeqop and ppeqop need overlaps.)\n\nOn 3/21/24 22:33, jian he wrote:\n > i think on update restrict, on delete restrict cannot be deferred,\n > even if you set it DEFERRABLE INITIALLY DEFERRED.\n > based on this idea, I made minor change on\n > v32-0002-Support-multiranges-in-temporal-FKs.patch\n\nOkay, added those tests too. Thanks!\n\nRebased to 697f8d266c.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Sat, 23 Mar 2024 10:42:47 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 23.03.24 18:42, Paul Jungwirth wrote:\n> Now this is a long chain of reasoning to say rangetypes are safe. I \n> added a comment. Note it doesn't apply to arbitrary types, so if we \n> support those eventually we should just require a recheck always, or \n> alternately use equals, not containedby. (That would require storing \n> equals somewhere. It could go in ffeqop, but that feels like a footgun \n> since pfeqop and ppeqop need overlaps.)\n\nOk, this explanation is good enough for now. I have committed the \npatches v33-0001-Add-temporal-FOREIGN-KEYs.patch and \nv33-0002-Support-multiranges-in-temporal-FKs.patch (together).\n\n\n\n", "msg_date": "Sun, 24 Mar 2024 08:38:08 +0100", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Sun, Mar 24, 2024 at 1:42 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> v33 attached with minor changes.\n>\n> Okay, added those tests too. Thanks!\n>\n> Rebased to 697f8d266c.\n>\n\n\nhi.\nminor issues I found in v33-0003.\nthere are 29 of {check_amproc_signature?.*false}\nonly one {check_amproc_signature(procform->amproc, opcintype, true}\nis this refactoring really worth it?\n\nWe also need to refactor gistadjustmembers?\n\n\n+ <row>\n+ <entry><function>intersect</function></entry>\n+ <entry>computes intersection with <literal>FOR PORTION OF</literal>\n+ bounds</entry>\n+ <entry>13</entry>\n+ </row>\n+ <row>\n+ <entry><function>without_portion</function></entry>\n+ <entry>computes remaining duration(s) outside\n+ <literal>FOR PORTION OF</literal> bounds</entry>\n+ <entry>14</entry>\n+ </row>\nneeds to add \"(optional)\".\n\n\n+<programlisting>\n+Datum\n+my_range_intersect(PG_FUNCTION_ARGS)\n+{\n+ RangeType *r1 = PG_GETARG_RANGE_P(0);\n+ RangeType *r2 = PG_GETARG_RANGE_P(1);\n+ TypeCacheEntry *typcache;\n+\n+ /* Different types should be prevented by ANYRANGE matching rules */\n+ if (RangeTypeGetOid(r1) != RangeTypeGetOid(r2))\n elog(ERROR, \"range\ntypes do not match\");\n+\n+ typcache = range_get_typcache(fcinfo, RangeTypeGetOid(r1));\n+\n+ PG_RETURN_RANGE_P(range_intersect_internal(typcache, r1, r2));\n+}\n+</programlisting>\nthe elog, ERROR indentation is wrong?\n\n\n+/*\n+ * range_without_portion_internal - Sets outputs and outputn to the ranges\n+ * remaining and their count (respectively) after subtracting r2 from r1.\n+ * The array should never contain empty ranges.\n+ * The outputs will be ordered. We expect that outputs is an array of\n+ * RangeType pointers, already allocated with two slots.\n+ */\n+void\n+range_without_portion_internal(TypeCacheEntry *typcache, RangeType *r1,\n+ RangeType *r2, RangeType **outputs, int *outputn)\nthe comments need to be refactored?\nthere is nothing related to \"slot\"?\nnot sure the \"array\" description is right.\n(my understanding is compute rangetype r1 and r2, and save the result to\nRangeType **outputs.\n\n\nselect proisstrict, proname from pg_proc where proname =\n'range_without_portion';\nrange_without_portion is strict.\nbut\nselect range_without_portion(NULL::int4range, int4range(11, 20,'[]'));\nreturn zero rows.\nIs this the expected behavior?\n\n\n0003 seems simple enough.\nbut it's more related to \"for portion of\".\nnot sure we can push 0003 into v17.\n\n\n", "msg_date": "Tue, 26 Mar 2024 08:00:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 3/24/24 00:38, Peter Eisentraut wrote:> I have committed the patches\n> v33-0001-Add-temporal-FOREIGN-KEYs.patch and v33-0002-Support-multiranges-in-temporal-FKs.patch \n> (together).\n\nHi Hackers,\n\nI found some problems with temporal primary keys and the idea of uniqueness, especially around the \nindisunique column. Here are some small fixes and a proposal for a larger fix, which I think we need \nbut I'd like some feedback on.\n\nThe first patch fixes problems with ON CONFLICT DO NOTHING/UPDATE.\n\nDO NOTHING fails because it doesn't expect a non-btree unique index. It's fine to make it accept a \ntemporal PRIMARY KEY/UNIQUE index though (i.e. an index with both indisunique and indisexclusion).\nThis is no different than an exclusion constraint. So I skip BuildSpeculativeIndexInfo for WITHOUT \nOVERLAPS indexes. (Incidentally, AFAICT ii_UniqueOps is never used, only ii_UniqueProcs. Right?)\n\nWe should still forbid temporally-unique indexes for ON CONFLICT DO UPDATE, since there may be more \nthan one row that conflicts. Ideally we would find and update all the conflicting rows, but we don't \nnow, and we don't want to update just one:\n\n postgres=# create table t (id int4range, valid_at daterange, name text, constraint tpk primary \nkey (id, valid_at without overlaps));\n CREATE TABLE\n postgres=# insert into t values ('[1,2)', '[2000-01-01,2001-01-01)', 'a'), ('[1,2)', \n'[2001-01-01,2002-01-01)', 'b');\n INSERT 0 2\n postgres=# insert into t values ('[1,2)', '[2000-01-01,2002-01-01)', 'c') on conflict (id, \nvalid_at) do update set name = excluded.name;\n INSERT 0 1\n postgres=# select * from t;\n id | valid_at | name\n -------+-------------------------+------\n [1,2) | [2001-01-01,2002-01-01) | b\n [1,2) | [2000-01-01,2001-01-01) | c\n (2 rows)\n\nSo I also added code to prevent that. This is just preserving the old behavior for exclusion \nconstraints, which was bypassed because of indisunique. All this is in the first patch.\n\nThat got me thinking about indisunique and where else it could cause problems. Perhaps there are \nother places that assume only b-trees are unique. I couldn't find anywhere that just gives an error \nlike ON CONFLICT, but I can imagine more subtle problems.\n\nA temporal PRIMARY KEY or UNIQUE constraint is unique in at least three ways: It is *metaphorically* \nunique: the conceit is that the scalar part is unique at every moment in time. You may have id 5 in \nyour table more than once, as long as the records' application times don't overlap.\n\nAnd it is *officially* unique: the standard calls these constraints unique. I think it is correct \nfor us to report them as unique in pg_index.\n\nBut is it *literally* unique? Well two identical keys, e.g. (5, '[Jan24,Mar24)') and (5, \n'[Jan24,Mar24)'), do have overlapping ranges, so the second is excluded. Normally a temporal unique \nindex is *more* restrictive than a standard one, since it forbids other values too (e.g. (5, \n'[Jan24,Feb24)')). But sadly there is one exception: the ranges in these keys do not overlap: (5, \n'empty'), (5, 'empty'). With ranges/multiranges, `'empty' && x` is false for all x. You can add that \nkey as many times as you like, despite a PK/UQ constraint:\n\n postgres=# insert into t values\n ('[1,2)', 'empty', 'foo'),\n ('[1,2)', 'empty', 'bar');\n INSERT 0 2\n postgres=# select * from t;\n id | valid_at | name\n -------+----------+------\n [1,2) | empty | foo\n [1,2) | empty | bar\n (2 rows)\n\nCases like this shouldn't actually happen for temporal tables, since empty is not a meaningful \nvalue. An UPDATE/DELETE FOR PORTION OF would never cause an empty. But we should still make sure \nthey don't cause problems.\n\nOne place we should avoid temporally-unique indexes is REPLICA IDENTITY. Fortunately we already do \nthat, but patch 2 adds a test to keep it that way.\n\nUniqueness is an important property to the planner, too.\n\nWe consider indisunique often for estimates, where it needn't be 100% true. Even if there are \nnullable columns or a non-indimmediate index, it still gives useful stats. Duplicates from 'empty' \nshouldn't cause any new problems there.\n\nIn proof code we must be more careful. Patch 3 updates relation_has_unique_index_ext and \nrel_supports_distinctness to disqualify WITHOUT OVERLAPS indexes. Maybe that's more cautious than \nneeded, but better safe than sorry. This patch has no new test though. I had trouble writing SQL \nthat was wrong before its change. I'd be happy for help here!\n\nAnother problem is GROUP BY and functional dependencies. This is wrong:\n\n postgres=# create table a (id int4range, valid_at daterange, name text, constraint apk primary \nkey (id, valid_at without overlaps));\n CREATE TABLE\n postgres=# insert into a values ('[1,2)', 'empty', 'foo'), ('[1,2)', 'empty', 'bar');\n INSERT 0 2\n postgres=# select * from a group by id, valid_at;\n id | valid_at | name\n -------+----------+------\n [1,2) | empty | foo\n (1 row)\n\nOne fix is to return false from check_functional_grouping for WITHOUT OVERLAPS primary keys. But I \nthink there is a better fix that is less ad hoc.\n\nWe should give temporal primary keys an internal CHECK constraint saying `NOT isempty(valid_at)`. \nThe problem is analogous to NULLs in parts of a primary key. NULLs prevent two identical keys from \never comparing as equal. And just as a regular primary key cannot contain NULLs, so a temporal \nprimary key should not contain empties.\n\nThe standard effectively prevents this with PERIODs, because a PERIOD adds a constraint saying start \n< end. But our ranges enforce only start <= end. If you say `int4range(4,4)` you get `empty`. If we \nconstrain primary keys as I'm suggesting, then they are literally unique, and indisunique seems safer.\n\nShould we add the same CHECK constraint to temporal UNIQUE indexes? I'm inclined toward no, just as \nwe don't forbid NULLs in parts of a UNIQUE key. We should try to pick what gives users more options, \nwhen possible. Even if it is questionably meaningful, I can see use cases for allowing empty ranges \nin a temporal table. For example it lets you \"disable\" a row, preserving its values but marking it \nas never true.\n\nAlso it gives you a way to make a non-temporal foreign key reference to a temporal table. Normally \ntemporal tables are \"contagious\", which is annoying. But if the referencing table had 'empty' for \nits temporal part, then references should succeed. For example this is true: 'empty'::daterange <@ \n'[2000-01-01,2001-01-01)'. (Technically this would require a small change to our FK SQL, because we \ndo `pkperiod && fkperiod` as an optimization (to use the index more fully), and we would need to \nskip that when fkperiod is empty.)\n\nFinally, if we have a not-empty constraint on our primary keys, then the GROUP BY problem above goes \naway. And we can still use temporal primary keys in proofs (but maybe not other temporally-unique \nindexes). We can allow them in relation_has_unique_index_ext/rel_supports_distinctness.\n\nThe drawback to putting a CHECK constraint on just PKs and not UNIQUEs is that indisunique may not \nbe literally unique for them, if they have empty ranges. But even for traditional UNIQUE \nconstraints, indisunique can be misleading: If they have nullable parts, identical keys are still \n\"unique\", so the code is already careful about them. Do note though the problems come from 'empty' \nvalues, not nullable values, so there might still be some planner rules we need to correct.\n\nAnother drawback is that by using isempty we're limiting temporal PKs to just ranges and \nmultiranges, whereas currently any type with appropriate operators is allowed. But since we decided \nto limit FKs already, I think this is okay. We can open it back up again later if we like (e.g. by \nadding a support function for the isempty concept).\n\nI'll start working on a patch for this too, but I'd be happy for early feedback/objections/etc.\n\nI guess an alternative would be to add a new operator, say &&&, that is the same as overlaps, except \n'empty' overlaps everything instead of nothing. In a way that seems more consistent with <@. (How \ncan a range contain something if it doesn't overlap it?) I don't love that a key like (5, 'empty') \nwould conflict with every other 5, but you as I said it's not a meaningful value in a temporal table \nanyway. Or you could have 'empty' overlap nothing except itself. Maybe I prefer this solution to an \ninternal CHECK constraint, but it feels like it has more unknown unknowns. Thoughts?\n\nAlso I suspect there are still places where indisunique causes problems. I'll keep looking for them, \nbut if others have thoughts please let me know.\n\nPatches here are generated against c627d944e6.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Tue, 2 Apr 2024 22:30:20 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Wed, Apr 3, 2024 at 1:30 PM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On 3/24/24 00:38, Peter Eisentraut wrote:> I have committed the patches\n> > v33-0001-Add-temporal-FOREIGN-KEYs.patch and v33-0002-Support-multiranges-in-temporal-FKs.patch\n> > (together).\n>\n> Hi Hackers,\n>\n> I found some problems with temporal primary keys and the idea of uniqueness, especially around the\n> indisunique column. Here are some small fixes and a proposal for a larger fix, which I think we need\n> but I'd like some feedback on.\n>\n> The first patch fixes problems with ON CONFLICT DO NOTHING/UPDATE.\n>\n> DO NOTHING fails because it doesn't expect a non-btree unique index. It's fine to make it accept a\n> temporal PRIMARY KEY/UNIQUE index though (i.e. an index with both indisunique and indisexclusion).\n> This is no different than an exclusion constraint. So I skip BuildSpeculativeIndexInfo for WITHOUT\n> OVERLAPS indexes. (Incidentally, AFAICT ii_UniqueOps is never used, only ii_UniqueProcs. Right?)\n>\n\nhi.\nfor unique index, primary key:\nii_ExclusionOps, ii_UniqueOps is enough to distinguish this index\nsupport without overlaps,\nwe don't need another ii_HasWithoutOverlaps?\n(i didn't test it though)\n\n\nON CONFLICT DO NOTHING\nON CONFLICT (id, valid_at) DO NOTHING\nON CONFLICT ON CONSTRAINT temporal_rng_pk DO NOTHING\nI am confused by the test.\nhere temporal_rng only has one primary key, ON CONFLICT only deals with it.\nI thought these three are the same thing?\n\n\nDROP TABLE temporal_rng;\nCREATE TABLE temporal_rng (id int4range,valid_at daterange);\nALTER TABLE temporal_rng ADD CONSTRAINT temporal_rng_pk PRIMARY KEY\n(id, valid_at WITHOUT OVERLAPS);\n\n+-- ON CONFLICT\n+--\n+TRUNCATE temporal_rng;\n+INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)',\ndaterange('2000-01-01', '2010-01-01'));\n+-- with a conflict\n+INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)',\ndaterange('2005-01-01', '2006-01-01')) ON CONFLICT DO NOTHING;\n+-- id matches but no conflict\n\n+TRUNCATE temporal_rng;\n+INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)',\ndaterange('2000-01-01', '2010-01-01'));\n+-- with a conflict\n+INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)',\ndaterange('2005-01-01', '2006-01-01')) ON CONFLICT (id, valid_at) DO\nNOTHING;\n+ERROR: there is no unique or exclusion constraint matching the ON\nCONFLICT specification\n\n+TRUNCATE temporal_rng;\n+INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)',\ndaterange('2000-01-01', '2010-01-01'));\n+-- with a conflict\n+INSERT INTO temporal_rng (id, valid_at) VALUES ('[1,2)',\ndaterange('2005-01-01', '2006-01-01')) ON CONFLICT ON CONSTRAINT\ntemporal_rng_pk DO NOTHING;\n\n\n", "msg_date": "Mon, 15 Apr 2024 08:00:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Wed, Apr 3, 2024 at 1:30 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n> I found some problems with temporal primary keys and the idea of uniqueness, especially around the\n> indisunique column. Here are some small fixes and a proposal for a larger fix, which I think we need\n> but I'd like some feedback on.\n\nI think this thread should be added to the open items list. You're\nraising questions about whether the feature that was committed to this\nrelease is fully correct. If it isn't, we shouldn't release it without\nfixing it.\n\nhttps://wiki.postgresql.org/wiki/PostgreSQL_17_Open_Items\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 26 Apr 2024 15:25:26 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 4/26/24 12:25, Robert Haas wrote:\n> I think this thread should be added to the open items list.\n\nThanks! I sent a request to pgsql-www to get edit permission. I didn't realize there was a wiki page \ntracking things like this. I agree it needs to be fixed if we want to include the feature.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Fri, 26 Apr 2024 12:41:55 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Fri, Apr 26, 2024 at 3:41 PM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n> On 4/26/24 12:25, Robert Haas wrote:\n> > I think this thread should be added to the open items list.\n>\n> Thanks! I sent a request to pgsql-www to get edit permission. I didn't realize there was a wiki page\n> tracking things like this. I agree it needs to be fixed if we want to include the feature.\n\nGreat, I see that it's on the list now.\n\nPeter, could you have a look at\nhttp://postgr.es/m/47550967-260b-4180-9791-b224859fe63e@illuminatedcomputing.com\nand express an opinion about whether each of those proposals are (a)\ngood or bad ideas and (b) whether they need to be fixed for the\ncurrent release?\n\nThanks,\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 30 Apr 2024 12:24:54 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 4/30/24 09:24, Robert Haas wrote:\n> Peter, could you have a look at\n> http://postgr.es/m/47550967-260b-4180-9791-b224859fe63e@illuminatedcomputing.com\n> and express an opinion about whether each of those proposals are (a)\n> good or bad ideas and (b) whether they need to be fixed for the\n> current release?\n\nHere are the same patches but rebased. I've added a fourth which is my progress on adding the CHECK \nconstraint. I don't really consider it finished though, because it has these problems:\n\n- The CHECK constraint should be marked as an internal dependency of the PK, so that you can't drop \nit, and it gets dropped when you drop the PK. I don't see a good way to tie the two together though, \nso I'd appreciate any advice there. They are separate AlterTableCmds, so how do I get the \nObjectAddress of both constraints at the same time? I wanted to store the PK's ObjectAddress on the \nConstraint node, but since ObjectAddress isn't a Node it doesn't work.\n\n- The CHECK constraint should maybe be hidden when you say `\\d foo`? Or maybe not, but that's what \nwe do with FK triggers.\n\n- When you create partitions you get a warning about the constraint already existing, because it \ngets created via the PK and then also the partitioning code tries to copy it. Solving the first \nissue here should solve this nicely though.\n\nAlternately we could just fix the GROUP BY functional dependency code to only accept b-tree indexes. \nBut I think the CHECK constraint approach is a better solution.\n\nThanks,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Tue, 30 Apr 2024 09:39:24 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Wed, May 1, 2024 at 12:39 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On 4/30/24 09:24, Robert Haas wrote:\n> > Peter, could you have a look at\n> > http://postgr.es/m/47550967-260b-4180-9791-b224859fe63e@illuminatedcomputing.com\n> > and express an opinion about whether each of those proposals are (a)\n> > good or bad ideas and (b) whether they need to be fixed for the\n> > current release?\n>\n> Here are the same patches but rebased. I've added a fourth which is my progress on adding the CHECK\n> constraint. I don't really consider it finished though, because it has these problems:\n>\n> - The CHECK constraint should be marked as an internal dependency of the PK, so that you can't drop\n> it, and it gets dropped when you drop the PK. I don't see a good way to tie the two together though,\n> so I'd appreciate any advice there. They are separate AlterTableCmds, so how do I get the\n> ObjectAddress of both constraints at the same time? I wanted to store the PK's ObjectAddress on the\n> Constraint node, but since ObjectAddress isn't a Node it doesn't work.\n>\n> - The CHECK constraint should maybe be hidden when you say `\\d foo`? Or maybe not, but that's what\n> we do with FK triggers.\n>\n> - When you create partitions you get a warning about the constraint already existing, because it\n> gets created via the PK and then also the partitioning code tries to copy it. Solving the first\n> issue here should solve this nicely though.\n>\n> Alternately we could just fix the GROUP BY functional dependency code to only accept b-tree indexes.\n> But I think the CHECK constraint approach is a better solution.\n>\n\nI will consider these issues later.\nThe following are general ideas after applying your patches.\n\nCREATE TABLE temporal_rng1(\nid int4range,\nvalid_at daterange,\nCONSTRAINT temporal_rng1_pk unique (id, valid_at WITHOUT OVERLAPS)\n);\ninsert into temporal_rng1(id, valid_at) values (int4range '[1,1]',\n'empty'::daterange), ('[1,1]', 'empty');\ntable temporal_rng1;\n id | valid_at\n-------+----------\n [1,2) | empty\n [1,2) | empty\n(2 rows)\n\ni hope i didn't miss something:\nexclude the 'empty' special value, WITHOUT OVERLAP constraint will be\nunique and is more restrictive?\n\nif so,\nthen adding a check constraint to make the WITHOUT OVERLAP not include\nthe special value 'empty'\nis better than\nwriting a doc explaining that on some special occasion, a unique\nconstraint is not meant to be unique\n?\n\nin here\nhttps://www.postgresql.org/docs/devel/ddl-constraints.html#DDL-CONSTRAINTS-UNIQUE-CONSTRAINTS\nsays:\n<<\nUnique constraints ensure that the data contained in a column, or a\ngroup of columns, is unique among all the rows in the table.\n<<\n\n+ /*\n+ * The WITHOUT OVERLAPS part (if any) must be\n+ * a range or multirange type.\n+ */\n+ if (constraint->without_overlaps && lc == list_last_cell(constraint->keys))\n+ {\n+ Oid typid = InvalidOid;\n+\n+ if (!found && cxt->isalter)\n+ {\n+ /*\n+ * Look up the column type on existing table.\n+ * If we can't find it, let things fail in DefineIndex.\n+ */\n+ Relation rel = cxt->rel;\n+ for (int i = 0; i < rel->rd_att->natts; i++)\n+ {\n+ Form_pg_attribute attr = TupleDescAttr(rel->rd_att, i);\n+ const char *attname;\n+\n+ if (attr->attisdropped)\n+ break;\n+\n+ attname = NameStr(attr->attname);\n+ if (strcmp(attname, key) == 0)\n+ {\n+ typid = attr->atttypid;\n+ break;\n+ }\n+ }\n+ }\n+ else\n+ typid = typenameTypeId(NULL, column->typeName);\n+\n+ if (OidIsValid(typid) && !type_is_range(typid) && !type_is_multirange(typid))\n+ ereport(ERROR,\n+ (errcode(ERRCODE_DATATYPE_MISMATCH),\n+ errmsg(\"column \\\"%s\\\" in WITHOUT OVERLAPS is not a range or\nmultirange type\", key),\n+ parser_errposition(cxt->pstate, constraint->location)));\n+ }\n\n+ if (attr->attisdropped)\n+ break;\nit will break the loop?\nbut here you want to continue the loop?\n\n+ if (OidIsValid(typid) && !type_is_range(typid) && !type_is_multirange(typid))\ndidn't consider the case where typid is InvalidOid,\nmaybe we can simplify to\n+ if (!type_is_range(typid) && !type_is_multirange(typid))\n\n\n+ notnullcmds = lappend(notnullcmds, notemptycmd);\nseems weird.\nwe can imitate notnullcmds related logic for notemptycmd,\nnot associated notnullcmds in any way.\n\n\n", "msg_date": "Wed, 1 May 2024 19:27:56 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Wed, May 1, 2024 at 12:39 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On 4/30/24 09:24, Robert Haas wrote:\n> > Peter, could you have a look at\n> > http://postgr.es/m/47550967-260b-4180-9791-b224859fe63e@illuminatedcomputing.com\n> > and express an opinion about whether each of those proposals are (a)\n> > good or bad ideas and (b) whether they need to be fixed for the\n> > current release?\n>\n> Here are the same patches but rebased. I've added a fourth which is my progress on adding the CHECK\n> constraint. I don't really consider it finished though, because it has these problems:\n>\n> - The CHECK constraint should be marked as an internal dependency of the PK, so that you can't drop\n> it, and it gets dropped when you drop the PK. I don't see a good way to tie the two together though,\n> so I'd appreciate any advice there. They are separate AlterTableCmds, so how do I get the\n> ObjectAddress of both constraints at the same time? I wanted to store the PK's ObjectAddress on the\n> Constraint node, but since ObjectAddress isn't a Node it doesn't work.\n>\n\nhi.\nI hope I understand the problem correctly.\nmy understanding is that we are trying to solve a corner case:\ncreate table t(a int4range, b int4range, primary key(a, b WITHOUT OVERLAPS));\ninsert into t values ('[1,2]','empty'), ('[1,2]','empty');\n\n\nI think the entry point is ATAddCheckNNConstraint and index_create.\nin a chain of DDL commands, you cannot be sure which one\n(primary key constraint or check constraint) is being created first,\nyou just want to make sure that after both constraints are created,\nthen add a dependency between primary key and check constraint.\n\nso you need to validate at different functions\n(ATAddCheckNNConstraint, index_create)\nthat these two constraints are indeed created,\nonly after that we have a dependency linking these two constraints.\n\n\nI've attached a patch trying to solve this problem.\nthe patch is not totally polished, but works as expected, and also has\nlots of comments.", "msg_date": "Mon, 6 May 2024 11:01:30 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 30.04.24 18:39, Paul Jungwirth wrote:\n> On 4/30/24 09:24, Robert Haas wrote:\n>> Peter, could you have a look at\n>> http://postgr.es/m/47550967-260b-4180-9791-b224859fe63e@illuminatedcomputing.com\n>> and express an opinion about whether each of those proposals are (a)\n>> good or bad ideas and (b) whether they need to be fixed for the\n>> current release?\n> \n> Here are the same patches but rebased.\n\nI have committed v2-0002-Add-test-for-REPLICA-IDENTITY-with-a-temporal-key.patch.\n\nAbout v2-0001-Fix-ON-CONFLICT-DO-NOTHING-UPDATE-for-temporal-in.patch, I think the\nideas are right, but I wonder if we can fine-tune the new conditionals a bit.\n\n--- a/src/backend/executor/execIndexing.c\n+++ b/src/backend/executor/execIndexing.c\n@@ -210,7 +210,7 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)\n * If the indexes are to be used for speculative insertion, add extra\n * information required by unique index entries.\n */\n- if (speculative && ii->ii_Unique)\n+ if (speculative && ii->ii_Unique && !ii->ii_HasWithoutOverlaps)\n BuildSpeculativeIndexInfo(indexDesc, ii);\n\nHere, I think we could check !indexDesc->rd_index->indisexclusion instead. So we\nwouldn't need ii_HasWithoutOverlaps.\n\nOr we could push this into BuildSpeculativeIndexInfo(); it could just skip the rest\nif an exclusion constraint is passed, on the theory that all the speculative index\ninfo is already present in that case.\n\n--- a/src/backend/optimizer/util/plancat.c\n+++ b/src/backend/optimizer/util/plancat.c\n@@ -815,7 +815,7 @@ infer_arbiter_indexes(PlannerInfo *root)\n */\n if (indexOidFromConstraint == idxForm->indexrelid)\n {\n- if (!idxForm->indisunique && onconflict->action == ONCONFLICT_UPDATE)\n+ if ((!idxForm->indisunique || idxForm->indisexclusion) && onconflict->action == ONCONFLICT_UPDATE)\n ereport(ERROR,\n (errcode(ERRCODE_WRONG_OBJECT_TYPE),\n errmsg(\"ON CONFLICT DO UPDATE not supported with exclusion constraints\")));\n\nShouldn't this use only idxForm->indisexclusion anyway? Like\n\n+ if (idxForm->indisexclusion && onconflict->action == ONCONFLICT_UPDATE)\n\nThat matches what the error message is reporting afterwards.\n\n * constraints), so index under consideration can be immediately\n * skipped if it's not unique\n */\n- if (!idxForm->indisunique)\n+ if (!idxForm->indisunique || idxForm->indisexclusion)\n goto next;\n\nMaybe here we need a comment. Or make that a separate statement, like\n\n /* not supported yet etc. */\n if (idxForm->indixexclusion)\n next;\n\n\n\n", "msg_date": "Wed, 8 May 2024 15:51:28 +0200", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Here are a couple new patches, rebased to e305f715, addressing Peter's feedback. I'm still working \non integrating jian he's suggestions for the last patch, so I've omitted that one here.\n\nOn 5/8/24 06:51, Peter Eisentraut wrote:\n> About v2-0001-Fix-ON-CONFLICT-DO-NOTHING-UPDATE-for-temporal-in.patch, I think the\n> ideas are right, but I wonder if we can fine-tune the new conditionals a bit.\n> \n> --- a/src/backend/executor/execIndexing.c\n> +++ b/src/backend/executor/execIndexing.c\n> @@ -210,7 +210,7 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo, bool speculative)\n>                  * If the indexes are to be used for speculative insertion, add extra\n>                  * information required by unique index entries.\n>                  */\n> -               if (speculative && ii->ii_Unique)\n> +               if (speculative && ii->ii_Unique && !ii->ii_HasWithoutOverlaps)\n>                         BuildSpeculativeIndexInfo(indexDesc, ii);\n> \n> Here, I think we could check !indexDesc->rd_index->indisexclusion instead.  So we\n> wouldn't need ii_HasWithoutOverlaps.\n\nOkay.\n\n> Or we could push this into BuildSpeculativeIndexInfo(); it could just skip the rest\n> if an exclusion constraint is passed, on the theory that all the speculative index\n> info is already present in that case.\n\nI like how BuildSpeculativeIndexInfo starts with an Assert that it's given a unique index, so I've \nleft the check outside the function. This seems cleaner anyway: the function stays more focused.\n\n> --- a/src/backend/optimizer/util/plancat.c\n> +++ b/src/backend/optimizer/util/plancat.c\n> @@ -815,7 +815,7 @@ infer_arbiter_indexes(PlannerInfo *root)\n>          */\n>         if (indexOidFromConstraint == idxForm->indexrelid)\n>         {\n> -           if (!idxForm->indisunique && onconflict->action == ONCONFLICT_UPDATE)\n> +           if ((!idxForm->indisunique || idxForm->indisexclusion) && onconflict->action == \n> ONCONFLICT_UPDATE)\n>                 ereport(ERROR,\n>                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),\n>                          errmsg(\"ON CONFLICT DO UPDATE not supported with exclusion constraints\")));\n> \n> Shouldn't this use only idxForm->indisexclusion anyway?  Like\n> \n> +           if (idxForm->indisexclusion && onconflict->action == ONCONFLICT_UPDATE)\n> \n> That matches what the error message is reporting afterwards.\n\nAgreed.\n\n>          * constraints), so index under consideration can be immediately\n>          * skipped if it's not unique\n>          */\n> -       if (!idxForm->indisunique)\n> +       if (!idxForm->indisunique || idxForm->indisexclusion)\n>             goto next;\n> \n> Maybe here we need a comment.  Or make that a separate statement, like\n\nYes, that is nice. Done.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Wed, 8 May 2024 21:24:09 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Hi,\n\nI haven't really been following this thread, but after playing around\na bit with the feature I feel there are new gaps in error messages. I\nalso think there are gaps in the functionality regarding the (lack of)\nsupport for CREATE UNIQUE INDEX, and attaching these indexes to\nconstraints.\n\npg=# CREATE TABLE temporal_testing (\npg(# id bigint NOT NULL\npg(# generated always as identity,\npg(# valid_during tstzrange\npg(# );\nCREATE TABLE\npg=# ALTER TABLE temporal_testing\npg-# ADD CONSTRAINT temp_unique UNIQUE (id, valid_during WITHOUT OVERLAPS);\nALTER TABLE\npg=# \\d+ temp_unique\n Index \"public.temp_unique\"\n Column | Type | Key? | Definition | Storage | Stats target\n--------------+-------------+------+--------------+----------+--------------\n id | gbtreekey16 | yes | id | plain |\n valid_during | tstzrange | yes | valid_during | extended |\nunique, gist, for table \"public.temporal_testing\"\n-- ^^ note the \"unique, gist\"\npg=# CREATE UNIQUE INDEX ON temporal_testing USING gist (id, valid_during);\nERROR: access method \"gist\" does not support unique indexes\n\nHere we obviously have a unique GIST index in the catalogs, but\nthey're \"not supported\" by GIST when we try to create such index\nourselves (!). Either the error message needs updating, or we need to\nhave a facility to actually support creating these unique indexes\noutside constraints.\n\nAdditionally, because I can't create my own non-constraint-backing\nunique GIST indexes, I can't pre-create my unique constraints\nCONCURRENTLY as one could do for the non-temporal case: UNIQUE\nconstraints hold ownership of the index and would drop the index if\nthe constraint is dropped, too, and don't support a CONCURRENTLY\nmodifier, nor an INVALID modifier. This means temporal unique\nconstraints have much less administrative wiggle room than normal\nunique constraints, and I think that's not great.\n\nKind regards,\n\nMatthias van de Meent.\n\n\n", "msg_date": "Fri, 10 May 2024 02:44:08 +0200", "msg_from": "Matthias van de Meent <boekewurm+postgres@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "I have committed the \nv2-0001-Fix-ON-CONFLICT-DO-NOTHING-UPDATE-for-temporal-in.patch from \nthis (confusingly, there was also a v2 earlier in this thread), and I'll \ncontinue working on the remaining items.\n\n\nOn 09.05.24 06:24, Paul Jungwirth wrote:\n> Here are a couple new patches, rebased to e305f715, addressing Peter's \n> feedback. I'm still working on integrating jian he's suggestions for the \n> last patch, so I've omitted that one here.\n> \n> On 5/8/24 06:51, Peter Eisentraut wrote:\n>> About v2-0001-Fix-ON-CONFLICT-DO-NOTHING-UPDATE-for-temporal-in.patch, \n>> I think the\n>> ideas are right, but I wonder if we can fine-tune the new conditionals \n>> a bit.\n>>\n>> --- a/src/backend/executor/execIndexing.c\n>> +++ b/src/backend/executor/execIndexing.c\n>> @@ -210,7 +210,7 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo, bool \n>> speculative)\n>>                   * If the indexes are to be used for speculative \n>> insertion, add extra\n>>                   * information required by unique index entries.\n>>                   */\n>> -               if (speculative && ii->ii_Unique)\n>> +               if (speculative && ii->ii_Unique && \n>> !ii->ii_HasWithoutOverlaps)\n>>                          BuildSpeculativeIndexInfo(indexDesc, ii);\n>>\n>> Here, I think we could check !indexDesc->rd_index->indisexclusion \n>> instead.  So we\n>> wouldn't need ii_HasWithoutOverlaps.\n> \n> Okay.\n> \n>> Or we could push this into BuildSpeculativeIndexInfo(); it could just \n>> skip the rest\n>> if an exclusion constraint is passed, on the theory that all the \n>> speculative index\n>> info is already present in that case.\n> \n> I like how BuildSpeculativeIndexInfo starts with an Assert that it's \n> given a unique index, so I've left the check outside the function. This \n> seems cleaner anyway: the function stays more focused.\n> \n>> --- a/src/backend/optimizer/util/plancat.c\n>> +++ b/src/backend/optimizer/util/plancat.c\n>> @@ -815,7 +815,7 @@ infer_arbiter_indexes(PlannerInfo *root)\n>>           */\n>>          if (indexOidFromConstraint == idxForm->indexrelid)\n>>          {\n>> -           if (!idxForm->indisunique && onconflict->action == \n>> ONCONFLICT_UPDATE)\n>> +           if ((!idxForm->indisunique || idxForm->indisexclusion) && \n>> onconflict->action == ONCONFLICT_UPDATE)\n>>                  ereport(ERROR,\n>>                          (errcode(ERRCODE_WRONG_OBJECT_TYPE),\n>>                           errmsg(\"ON CONFLICT DO UPDATE not supported \n>> with exclusion constraints\")));\n>>\n>> Shouldn't this use only idxForm->indisexclusion anyway?  Like\n>>\n>> +           if (idxForm->indisexclusion && onconflict->action == \n>> ONCONFLICT_UPDATE)\n>>\n>> That matches what the error message is reporting afterwards.\n> \n> Agreed.\n> \n>>           * constraints), so index under consideration can be immediately\n>>           * skipped if it's not unique\n>>           */\n>> -       if (!idxForm->indisunique)\n>> +       if (!idxForm->indisunique || idxForm->indisexclusion)\n>>              goto next;\n>>\n>> Maybe here we need a comment.  Or make that a separate statement, like\n> \n> Yes, that is nice. Done.\n> \n> Yours,\n> \n\n\n\n", "msg_date": "Fri, 10 May 2024 15:25:46 +0200", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Mon, May 6, 2024 at 11:01 AM jian he <jian.universality@gmail.com> wrote:\n>\n> On Wed, May 1, 2024 at 12:39 AM Paul Jungwirth\n> <pj@illuminatedcomputing.com> wrote:\n> >\n> > On 4/30/24 09:24, Robert Haas wrote:\n> > > Peter, could you have a look at\n> > > http://postgr.es/m/47550967-260b-4180-9791-b224859fe63e@illuminatedcomputing.com\n> > > and express an opinion about whether each of those proposals are (a)\n> > > good or bad ideas and (b) whether they need to be fixed for the\n> > > current release?\n> >\n> > Here are the same patches but rebased. I've added a fourth which is my progress on adding the CHECK\n> > constraint. I don't really consider it finished though, because it has these problems:\n> >\n> > - The CHECK constraint should be marked as an internal dependency of the PK, so that you can't drop\n> > it, and it gets dropped when you drop the PK. I don't see a good way to tie the two together though,\n> > so I'd appreciate any advice there. They are separate AlterTableCmds, so how do I get the\n> > ObjectAddress of both constraints at the same time? I wanted to store the PK's ObjectAddress on the\n> > Constraint node, but since ObjectAddress isn't a Node it doesn't work.\n> >\n>\n> hi.\n> I hope I understand the problem correctly.\n> my understanding is that we are trying to solve a corner case:\n> create table t(a int4range, b int4range, primary key(a, b WITHOUT OVERLAPS));\n> insert into t values ('[1,2]','empty'), ('[1,2]','empty');\n>\n\n\nbut we still not yet address for cases like:\ncreate table t10(a int4range, b int4range, unique (a, b WITHOUT OVERLAPS));\ninsert into t10 values ('[1,2]','empty'), ('[1,2]','empty');\n\none table can have more than one temporal unique constraint,\nfor each temporal unique constraint adding a check isempty constraint\nseems not easy.\n\nfor example:\nCREATE TABLE t (\nid int4range,\nvalid_at daterange,\nparent_id int4range,\nCONSTRAINT t1 unique (id, valid_at WITHOUT OVERLAPS),\nCONSTRAINT t2 unique (parent_id, valid_at WITHOUT OVERLAPS),\nCONSTRAINT t3 unique (valid_at, id WITHOUT OVERLAPS),\nCONSTRAINT t4 unique (parent_id, id WITHOUT OVERLAPS),\nCONSTRAINT t5 unique (id, parent_id WITHOUT OVERLAPS),\nCONSTRAINT t6 unique (valid_at, parent_id WITHOUT OVERLAPS)\n);\nadd 6 check isempty constraints for table \"t\" is challenging.\n\nso far, I see the challenging part:\n* alter table alter column data type does not drop previous check\nisempty constraint, and will also add a check isempty constraint,\nso overall it will add more check constraints.\n* adding more check constraints needs a way to resolve naming collisions.\n\nMaybe we can just mention that the special 'empty' range value makes\ntemporal unique constraints not \"unique\".\n\nalso we can make sure that\nFOREIGN KEY can only reference primary keys, not unique temporal constraints.\nso the unique temporal constraints not \"unique\" implication is limited.\nI played around with it, we can error out these cases in the function\ntransformFkeyCheckAttrs.\n\n\n", "msg_date": "Sun, 12 May 2024 08:00:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 5/11/24 17:00, jian he wrote:\n>> I hope I understand the problem correctly.\n>> my understanding is that we are trying to solve a corner case:\n>> create table t(a int4range, b int4range, primary key(a, b WITHOUT OVERLAPS));\n>> insert into t values ('[1,2]','empty'), ('[1,2]','empty');\n>>\n> \n> \n> but we still not yet address for cases like:\n> create table t10(a int4range, b int4range, unique (a, b WITHOUT OVERLAPS));\n> insert into t10 values ('[1,2]','empty'), ('[1,2]','empty');\n> \n> one table can have more than one temporal unique constraint,\n> for each temporal unique constraint adding a check isempty constraint\n> seems not easy.\n\nI think we should add the not-empty constraint only for PRIMARY KEYs, not all UNIQUE constraints. \nThe empty edge case is very similar to the NULL edge case, and while every PK column must be \nnon-null, we do allow nulls in ordinary UNIQUE constraints. If users want to have 'empty' in those \nconstraints, I think we should let them. And then the problems you give don't arise.\n\n> Maybe we can just mention that the special 'empty' range value makes\n> temporal unique constraints not \"unique\".\n\nJust documenting the behavior is also an okay solution here I think. I see two downsides though: (1) \nit makes rangetype temporal keys differ from PERIOD temporal keys (2) it could allow more \nplanner/etc bugs than we have thought of. So I think it's worth adding the constraint instead.\n\n> also we can make sure that\n> FOREIGN KEY can only reference primary keys, not unique temporal constraints.\n> so the unique temporal constraints not \"unique\" implication is limited.\n> I played around with it, we can error out these cases in the function\n> transformFkeyCheckAttrs.\n\nI don't think it is a problem to reference a temporal UNIQUE constraint, even if it contains empty \nvalues. An empty value means you're not asserting that row at any time (though another row might \nassert the same thing for some time), so it could never contribute toward fulfilling a reference anyway.\n\nI do think it would be nice if the *reference* could contain empty values. Right now the FK SQL will \ncause that to never match, because we use `&&` as an optimization, but we could tweak the SQL (maybe \nfor v18 instead) so that users could get away with that kind of thing. As I said in an earlier \nemail, this would be you an escape hatch to reference a temporal table from a non-temporal table. \nOtherwise temporal tables are \"contagious,\" which is a bit of a drawback.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Sat, 11 May 2024 20:25:45 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 5/9/24 17:44, Matthias van de Meent wrote:\n> I haven't really been following this thread, but after playing around\n> a bit with the feature I feel there are new gaps in error messages. I\n> also think there are gaps in the functionality regarding the (lack of)\n> support for CREATE UNIQUE INDEX, and attaching these indexes to\n> constraints\nThank you for trying this out and sharing your thoughts! I think these are good points about CREATE \nUNIQUE INDEX and then creating the constraint by handing it an existing index. This is something \nthat I am hoping to add, but it's not covered by the SQL:2011 standard, so I think it needs some \ndiscussion, and I don't think it needs to go into v17.\n\nFor instance you are saying:\n\n > pg=# CREATE UNIQUE INDEX ON temporal_testing USING gist (id, valid_during);\n > ERROR: access method \"gist\" does not support unique indexes\n\nTo me that error message seems correct. The programmer hasn't said anything about the special \ntemporal behavior they are looking for. To get non-overlapping semantics from an index, this more \nexplicit syntax seems better, similar to PKs in the standard:\n\n > pg=# CREATE UNIQUE INDEX ON temporal_testing USING gist (id, valid_during WITHOUT OVERLAPS);\n > ERROR: access method \"gist\" does not support unique indexes\n\nWe could also support *non-temporal* unique GiST indexes, particularly now that we have the stratnum \nsupport function. Those would use the syntax you gave, omitting WITHOUT OVERLAPS. But that seems \nlike a separate effort to me.\n\n> Additionally, because I can't create my own non-constraint-backing\n> unique GIST indexes, I can't pre-create my unique constraints\n> CONCURRENTLY as one could do for the non-temporal case: UNIQUE\n> constraints hold ownership of the index and would drop the index if\n> the constraint is dropped, too, and don't support a CONCURRENTLY\n> modifier, nor an INVALID modifier. This means temporal unique\n> constraints have much less administrative wiggle room than normal\n> unique constraints, and I think that's not great.\n\nThis is a great use-case for why we should support this eventually, even if it uses non-standard syntax.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Sat, 11 May 2024 20:26:55 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Sun, 12 May 2024 at 05:26, Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n> On 5/9/24 17:44, Matthias van de Meent wrote:\n> > I haven't really been following this thread, but after playing around\n> > a bit with the feature I feel there are new gaps in error messages. I\n> > also think there are gaps in the functionality regarding the (lack of)\n> > support for CREATE UNIQUE INDEX, and attaching these indexes to\n> > constraints\n> Thank you for trying this out and sharing your thoughts! I think these are good points about CREATE\n> UNIQUE INDEX and then creating the constraint by handing it an existing index. This is something\n> that I am hoping to add, but it's not covered by the SQL:2011 standard, so I think it needs some\n> discussion, and I don't think it needs to go into v17.\n\nOkay.\n\n> For instance you are saying:\n>\n> > pg=# CREATE UNIQUE INDEX ON temporal_testing USING gist (id, valid_during);\n> > ERROR: access method \"gist\" does not support unique indexes\n>\n> To me that error message seems correct. The programmer hasn't said anything about the special\n> temporal behavior they are looking for.\n\nBut I showed that I had a GIST index that does have the indisunique\nflag set, which shows that GIST does support indexes with unique\nsemantics.\n\nThat I can't use CREATE UNIQUE INDEX to create such an index doesn't\nmean the feature doesn't exist, which is what the error message\nimplies.\n\n> To get non-overlapping semantics from an index, this more\n> explicit syntax seems better, similar to PKs in the standard:\n\nYes, agreed on that part.\n\n> > pg=# CREATE UNIQUE INDEX ON temporal_testing USING gist (id, valid_during WITHOUT OVERLAPS);\n> > ERROR: access method \"gist\" does not support unique indexes\n>\n> We could also support *non-temporal* unique GiST indexes, particularly now that we have the stratnum\n> support function. Those would use the syntax you gave, omitting WITHOUT OVERLAPS. But that seems\n> like a separate effort to me.\n\nNo objection on that.\n\nKind regards,\n\nMatthias van de Meent\n\n\n", "msg_date": "Sun, 12 May 2024 14:55:52 +0200", "msg_from": "Matthias van de Meent <boekewurm+postgres@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 5/12/24 05:55, Matthias van de Meent wrote:\n>> > pg=# CREATE UNIQUE INDEX ON temporal_testing USING gist (id, valid_during);\n>> > ERROR: access method \"gist\" does not support unique indexes\n>>\n>> To me that error message seems correct. The programmer hasn't said anything about the special\n>> temporal behavior they are looking for.\n> \n> But I showed that I had a GIST index that does have the indisunique\n> flag set, which shows that GIST does support indexes with unique\n> semantics.\n> \n> That I can't use CREATE UNIQUE INDEX to create such an index doesn't\n> mean the feature doesn't exist, which is what the error message\n> implies.\n\nTrue, the error message is not really telling the truth anymore. I do think most people who hit this \nerror are not thinking about temporal constraints at all though, and for non-temporal constraints it \nis still true. It's also true for CREATE INDEX, since WITHOUT OVERLAPS is only available on the \n*constraint*. So how about adding a hint, something like this?:\n\nERROR: access method \"gist\" does not support unique indexes\nHINT: To create a unique constraint with non-overlap behavior, use ADD CONSTRAINT ... WITHOUT OVERLAPS.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Sun, 12 May 2024 08:51:11 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 5/5/24 20:01, jian he wrote:\n> hi.\n> I hope I understand the problem correctly.\n> my understanding is that we are trying to solve a corner case:\n> create table t(a int4range, b int4range, primary key(a, b WITHOUT OVERLAPS));\n> insert into t values ('[1,2]','empty'), ('[1,2]','empty');\n> \n> \n> I think the entry point is ATAddCheckNNConstraint and index_create.\n> in a chain of DDL commands, you cannot be sure which one\n> (primary key constraint or check constraint) is being created first,\n> you just want to make sure that after both constraints are created,\n> then add a dependency between primary key and check constraint.\n> \n> so you need to validate at different functions\n> (ATAddCheckNNConstraint, index_create)\n> that these two constraints are indeed created,\n> only after that we have a dependency linking these two constraints.\n> \n> \n> I've attached a patch trying to solve this problem.\n> the patch is not totally polished, but works as expected, and also has\n> lots of comments.\n\nThanks for this! I've incorporated it into the CHECK constraint patch with some changes. In \nparticular I thought index_create was a strange place to change the conperiod value of a \npg_constraint record, and it is not actually needed if we are copying that value correctly.\n\nSome other comments on the patch file:\n\n > N.B. we also need to have special care for case\n > where check constraint was readded, e.g. ALTER TYPE.\n > if ALTER TYPE is altering the PERIOD column of the primary key,\n > alter column of primary key makes the index recreate, check constraint recreate,\n > however, former interally also including add a check constraint.\n > so we need to take care of merging two check constraint.\n\nThis is a good point. I've included tests for this based on your patch.\n\n > N.B. the check constraint name is hard-wired, so if you create the constraint\n > with the same name, PERIOD primary key cannot be created.\n\nYes, it may be worth doing something like other auto-named constraints and trying to avoid \nduplicates. I haven't taken that on yet; I'm curious what others have to say about it.\n\n > N.B. what about UNIQUE constraint?\n\nSee my previous posts on this thread about allowing 'empty' in UNIQUE constraints.\n\n > N.B. seems ok to not care about FOREIGN KEY regarding this corner case?\n\nAgreed.\n\nv3 patches attached, rebased to 3ca43dbbb6.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Sun, 12 May 2024 17:51:41 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 5/12/24 08:51, Paul Jungwirth wrote:\n> On 5/12/24 05:55, Matthias van de Meent wrote:\n>>>   > pg=# CREATE UNIQUE INDEX ON temporal_testing USING gist (id, valid_during);\n>>>   > ERROR:  access method \"gist\" does not support unique indexes\n>>>\n>>> To me that error message seems correct. The programmer hasn't said anything about the special\n>>> temporal behavior they are looking for.\n>>\n>> But I showed that I had a GIST index that does have the indisunique\n>> flag set, which shows that GIST does support indexes with unique\n>> semantics.\n>>\n>> That I can't use CREATE UNIQUE INDEX to create such an index doesn't\n>> mean the feature doesn't exist, which is what the error message\n>> implies.\n> \n> True, the error message is not really telling the truth anymore. I do think most people who hit this \n> error are not thinking about temporal constraints at all though, and for non-temporal constraints it \n> is still true. It's also true for CREATE INDEX, since WITHOUT OVERLAPS is only available on the \n> *constraint*. So how about adding a hint, something like this?:\n> \n> ERROR:  access method \"gist\" does not support unique indexes\n> HINT: To create a unique constraint with non-overlap behavior, use ADD CONSTRAINT ... WITHOUT OVERLAPS.\n\nI thought a little more about eventually implementing WITHOUT OVERLAPS support for CREATE INDEX, and \nhow it relates to this error message in particular. Even when that is done, it will still depend on \nthe stratnum support function for the keys' opclasses, so the GiST AM itself will still have false \namcanunique, I believe. Probably the existing error message is still the right one. The hint won't \nneed to mention ADD CONSTRAINT anymore. It should still point users to WITHOUT OVERLAPS, and \npossibly the stratnum support function too. I think what we are doing for v17 is all compatible with \nthat plan.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Sun, 12 May 2024 21:54:34 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 03.04.24 07:30, Paul Jungwirth wrote:\n> But is it *literally* unique? Well two identical keys, e.g. (5, \n> '[Jan24,Mar24)') and (5, '[Jan24,Mar24)'), do have overlapping ranges, \n> so the second is excluded. Normally a temporal unique index is *more* \n> restrictive than a standard one, since it forbids other values too (e.g. \n> (5, '[Jan24,Feb24)')). But sadly there is one exception: the ranges in \n> these keys do not overlap: (5, 'empty'), (5, 'empty'). With \n> ranges/multiranges, `'empty' && x` is false for all x. You can add that \n> key as many times as you like, despite a PK/UQ constraint:\n> \n>     postgres=# insert into t values\n>     ('[1,2)', 'empty', 'foo'),\n>     ('[1,2)', 'empty', 'bar');\n>     INSERT 0 2\n>     postgres=# select * from t;\n>       id   | valid_at | name\n>     -------+----------+------\n>      [1,2) | empty    | foo\n>      [1,2) | empty    | bar\n>     (2 rows)\n> \n> Cases like this shouldn't actually happen for temporal tables, since \n> empty is not a meaningful value. An UPDATE/DELETE FOR PORTION OF would \n> never cause an empty. But we should still make sure they don't cause \n> problems.\n\n> We should give temporal primary keys an internal CHECK constraint saying \n> `NOT isempty(valid_at)`. The problem is analogous to NULLs in parts of a \n> primary key. NULLs prevent two identical keys from ever comparing as \n> equal. And just as a regular primary key cannot contain NULLs, so a \n> temporal primary key should not contain empties.\n> \n> The standard effectively prevents this with PERIODs, because a PERIOD \n> adds a constraint saying start < end. But our ranges enforce only start \n> <= end. If you say `int4range(4,4)` you get `empty`. If we constrain \n> primary keys as I'm suggesting, then they are literally unique, and \n> indisunique seems safer.\n> \n> Should we add the same CHECK constraint to temporal UNIQUE indexes? I'm \n> inclined toward no, just as we don't forbid NULLs in parts of a UNIQUE \n> key. We should try to pick what gives users more options, when possible. \n> Even if it is questionably meaningful, I can see use cases for allowing \n> empty ranges in a temporal table. For example it lets you \"disable\" a \n> row, preserving its values but marking it as never true.\n\nIt looks like we missed some of these fundamental design questions early \non, and it might be too late now to fix them for PG17.\n\nFor example, the discussion on unique constraints misses that the \nquestion of null values in unique constraints itself is controversial \nand that there is now a way to change the behavior. So I imagine there \nis also a selection of possible behaviors you might want for empty \nranges. Intuitively, I don't think empty ranges are sensible for \ntemporal unique constraints. But anyway, it's a bit late now to be \ndiscussing this.\n\nI'm also concerned that if ranges have this fundamental incompatibility \nwith periods, then the plan to eventually evolve this patch set to \nsupport standard periods will also have as-yet-unknown problems.\n\nSome of these issues might be design flaws in the underlying mechanisms, \nlike range types and exclusion constraints. Like, if you're supposed to \nuse this for scheduling but you can use empty ranges to bypass exclusion \nconstraints, how is one supposed to use this? Yes, a check constraint \nusing isempty() might be the right answer. But I don't see this \ndocumented anywhere.\n\nOn the technical side, adding an implicit check constraint as part of a \nprimary key constraint is quite a difficult implementation task, as I \nthink you are discovering. I'm just reminded about how the patch for \ncatalogued not-null constraints struggled with linking these not-null \nconstraints to primary keys correctly. This sounds a bit similar.\n\nI'm afraid that these issues cannot be resolved in good time for this \nrelease, so we should revert this patch set for now.\n\n\n\n", "msg_date": "Mon, 13 May 2024 12:11:11 +0200", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 5/13/24 03:11, Peter Eisentraut wrote:\n> It looks like we missed some of these fundamental design questions early on, and it might be too \n> late now to fix them for PG17.\n> \n> For example, the discussion on unique constraints misses that the question of null values in unique \n> constraints itself is controversial and that there is now a way to change the behavior.  So I \n> imagine there is also a selection of possible behaviors you might want for empty ranges. \n> Intuitively, I don't think empty ranges are sensible for temporal unique constraints.  But anyway, \n> it's a bit late now to be discussing this.\n> \n> I'm also concerned that if ranges have this fundamental incompatibility with periods, then the plan \n> to eventually evolve this patch set to support standard periods will also have as-yet-unknown problems.\n> \n> Some of these issues might be design flaws in the underlying mechanisms, like range types and \n> exclusion constraints.  Like, if you're supposed to use this for scheduling but you can use empty \n> ranges to bypass exclusion constraints, how is one supposed to use this?  Yes, a check constraint \n> using isempty() might be the right answer.  But I don't see this documented anywhere.\n> \n> On the technical side, adding an implicit check constraint as part of a primary key constraint is \n> quite a difficult implementation task, as I think you are discovering.  I'm just reminded about how \n> the patch for catalogued not-null constraints struggled with linking these not-null constraints to \n> primary keys correctly.  This sounds a bit similar.\n> \n> I'm afraid that these issues cannot be resolved in good time for this release, so we should revert \n> this patch set for now.\n\nI think reverting is a good idea. I'm not really happy with the CHECK constraint solution either. \nI'd be happy to have some more time to rework this for v18.\n\nA couple alternatives I'd like to explore:\n\n1. Domain constraints instead of a CHECK constraint. I think this is probably worse, and I don't \nplan to spend much time on it, but I thought I'd mention it in case someone else thought otherwise.\n\n2. A slightly different overlaps operator, say &&&, where 'empty' &&& 'empty' is true. But 'empty' \nwith anything else could still be false (or not). That operator would prevent duplicates in an \nexclusion constraint. This also means we could support more types than just ranges & multiranges. I \nneed to think about whether this combines badly with existing operators, but if not it has a lot of \npromise. If anything it might be *less* contradictory, because it fits better with 'empty' @> \n'empty', which we say is true.\n\nAnother thing a revert would give me some time to consider: even though it's not standard syntax, I \nwonder if we want to require syntax something like `PRIMARY KEY USING gist (id, valid_at WITHOUT \nOVERLAPS)`. Everywhere else we default to btree, so defaulting to gist feels a little weird. In \ntheory we could even someday support WITHOUT OVERLAPS with btree, if we taught that AM to answer \nthat question. (I admit there is probably not a lot of desire for that though.)\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Mon, 13 May 2024 16:30:37 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Tue, May 14, 2024 at 7:30 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On 5/13/24 03:11, Peter Eisentraut wrote:\n> > It looks like we missed some of these fundamental design questions early on, and it might be too\n> > late now to fix them for PG17.\n> >\n> > For example, the discussion on unique constraints misses that the question of null values in unique\n> > constraints itself is controversial and that there is now a way to change the behavior. So I\n> > imagine there is also a selection of possible behaviors you might want for empty ranges.\n> > Intuitively, I don't think empty ranges are sensible for temporal unique constraints. But anyway,\n> > it's a bit late now to be discussing this.\n> >\n> > I'm also concerned that if ranges have this fundamental incompatibility with periods, then the plan\n> > to eventually evolve this patch set to support standard periods will also have as-yet-unknown problems.\n> >\n> > Some of these issues might be design flaws in the underlying mechanisms, like range types and\n> > exclusion constraints. Like, if you're supposed to use this for scheduling but you can use empty\n> > ranges to bypass exclusion constraints, how is one supposed to use this? Yes, a check constraint\n> > using isempty() might be the right answer. But I don't see this documented anywhere.\n> >\n> > On the technical side, adding an implicit check constraint as part of a primary key constraint is\n> > quite a difficult implementation task, as I think you are discovering. I'm just reminded about how\n> > the patch for catalogued not-null constraints struggled with linking these not-null constraints to\n> > primary keys correctly. This sounds a bit similar.\n> >\n> > I'm afraid that these issues cannot be resolved in good time for this release, so we should revert\n> > this patch set for now.\n>\n> I think reverting is a good idea. I'm not really happy with the CHECK constraint solution either.\n> I'd be happy to have some more time to rework this for v18.\n>\n> A couple alternatives I'd like to explore:\n>\n> 1. Domain constraints instead of a CHECK constraint. I think this is probably worse, and I don't\n> plan to spend much time on it, but I thought I'd mention it in case someone else thought otherwise.\n>\n> 2. A slightly different overlaps operator, say &&&, where 'empty' &&& 'empty' is true. But 'empty'\n> with anything else could still be false (or not). That operator would prevent duplicates in an\n> exclusion constraint. This also means we could support more types than just ranges & multiranges. I\n> need to think about whether this combines badly with existing operators, but if not it has a lot of\n> promise. If anything it might be *less* contradictory, because it fits better with 'empty' @>\n> 'empty', which we say is true.\n>\nthanks for the idea, I roughly played around with it, seems doable.\nbut the timing seems not good, reverting is a good idea.\n\n\nI also checked the commit. 6db4598fcb82a87a683c4572707e522504830a2b\n+\n+/*\n+ * Returns the btree number for equals, otherwise invalid.\n+ */\n+Datum\n+gist_stratnum_btree(PG_FUNCTION_ARGS)\n+{\n+ StrategyNumber strat = PG_GETARG_UINT16(0);\n+\n+ switch (strat)\n+ {\n+ case RTEqualStrategyNumber:\n+ PG_RETURN_UINT16(BTEqualStrategyNumber);\n+ case RTLessStrategyNumber:\n+ PG_RETURN_UINT16(BTLessStrategyNumber);\n+ case RTLessEqualStrategyNumber:\n+ PG_RETURN_UINT16(BTLessEqualStrategyNumber);\n+ case RTGreaterStrategyNumber:\n+ PG_RETURN_UINT16(BTGreaterStrategyNumber);\n+ case RTGreaterEqualStrategyNumber:\n+ PG_RETURN_UINT16(BTGreaterEqualStrategyNumber);\n+ default:\n+ PG_RETURN_UINT16(InvalidStrategy);\n+ }\n+}\nthe comments seem not right?\n\n\n", "msg_date": "Tue, 14 May 2024 13:33:46 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Tue, May 14, 2024 at 01:33:46PM +0800, jian he wrote:\n> thanks for the idea, I roughly played around with it, seems doable.\n> but the timing seems not good, reverting is a good idea.\n\nPlease note that this is still an open item, and that time is running\nshort until beta1. A revert seems to be the consensus reached, so,\nPeter, are you planning to do so?\n--\nMichael", "msg_date": "Wed, 15 May 2024 15:13:23 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 15.05.24 08:13, Michael Paquier wrote:\n> On Tue, May 14, 2024 at 01:33:46PM +0800, jian he wrote:\n>> thanks for the idea, I roughly played around with it, seems doable.\n>> but the timing seems not good, reverting is a good idea.\n> \n> Please note that this is still an open item, and that time is running\n> short until beta1. A revert seems to be the consensus reached, so,\n> Peter, are you planning to do so?\n\nI'm on it.\n\nHere is the list of patches I have identified to revert:\n\ngit show --oneline --no-patch 144c2ce0cc7 c3db1f30cba 482e108cd38 \n34768ee3616 5577a71fb0c a88c800deb6 030e10ff1a3 86232a49a43 46a0cd4cefb \n6db4598fcb8\n\n144c2ce0cc7 Fix ON CONFLICT DO NOTHING/UPDATE for temporal indexes\nc3db1f30cba doc: clarify PERIOD and WITHOUT OVERLAPS in CREATE TABLE\n482e108cd38 Add test for REPLICA IDENTITY with a temporal key\n34768ee3616 Add temporal FOREIGN KEY contraints\n5577a71fb0c Use half-open interval notation in without_overlaps tests\na88c800deb6 Use daterange and YMD in without_overlaps tests instead of \ntsrange.\n030e10ff1a3 Rename pg_constraint.conwithoutoverlaps to conperiod\n86232a49a43 Fix comment on gist_stratnum_btree\n46a0cd4cefb Add temporal PRIMARY KEY and UNIQUE constraints\n6db4598fcb8 Add stratnum GiST support function\n\nAttached are the individual revert patches. I'm supplying these here \nmainly so that future efforts can use those instead of the original \npatches, since that would have to redo all the conflict resolution and \nalso miss various typo fixes etc. that were applied in the meantime. I \nwill commit this as one squashed patch.", "msg_date": "Wed, 15 May 2024 11:39:47 +0200", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 15.05.24 11:39, Peter Eisentraut wrote:\n> Attached are the individual revert patches.  I'm supplying these here \n> mainly so that future efforts can use those instead of the original \n> patches, since that would have to redo all the conflict resolution and \n> also miss various typo fixes etc. that were applied in the meantime.  I \n> will commit this as one squashed patch.\n\nThis has been done.\n\n\n\n", "msg_date": "Thu, 16 May 2024 09:02:36 +0200", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Mon, 2024-05-13 at 12:11 +0200, Peter Eisentraut wrote:\n> Some of these issues might be design flaws in the underlying\n> mechanisms, \n> like range types and exclusion constraints.  Like, if you're supposed\n> to \n> use this for scheduling but you can use empty ranges to bypass\n> exclusion \n> constraints, how is one supposed to use this?\n\nAn empty range does not \"bypass\" the an exclusion constraint. The\nexclusion constraint has a documented meaning and it's enforced.\n\nOf course there are situations where an empty range doesn't make a lot\nof sense. For many domains zero doesn't make any sense, either.\nConsider receiving an email saying \"thank you for purchasing 0\nwidgets!\". Check constraints seem like a reasonable way to prevent\nthose kinds of problems.\n\nRegards,\n\tJeff Davis\n\n\n\n", "msg_date": "Thu, 16 May 2024 16:22:35 -0700", "msg_from": "Jeff Davis <pgsql@j-davis.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Thu, May 16, 2024 at 7:22 PM Jeff Davis <pgsql@j-davis.com> wrote:\n> An empty range does not \"bypass\" the an exclusion constraint. The\n> exclusion constraint has a documented meaning and it's enforced.\n>\n> Of course there are situations where an empty range doesn't make a lot\n> of sense. For many domains zero doesn't make any sense, either.\n> Consider receiving an email saying \"thank you for purchasing 0\n> widgets!\". Check constraints seem like a reasonable way to prevent\n> those kinds of problems.\n\nI think that's true. Having infinitely many events zero-length events\nscheduled at the same point in time isn't necessarily a problem: I can\nattend an infinite number of simultaneous meetings if I only need to\nattend them for exactly zero time.\n\nWhat I think is less clear is what that means for temporal primary\nkeys. As Paul pointed out upthread, in every other case, a temporal\nprimary key is at least as unique as a regular primary key, but in\nthis case, it isn't. And someone might reasonably think that a\ntemporal primary key should exclude empty ranges just as all primary\nkeys exclude nulls. Or they might think the opposite.\n\nAt least, so it seems to me.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 21 May 2024 13:57:27 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Tue, 21 May 2024 at 13:57, Robert Haas <robertmhaas@gmail.com> wrote:\n\nWhat I think is less clear is what that means for temporal primary\n> keys. As Paul pointed out upthread, in every other case, a temporal\n> primary key is at least as unique as a regular primary key, but in\n> this case, it isn't. And someone might reasonably think that a\n> temporal primary key should exclude empty ranges just as all primary\n> keys exclude nulls. Or they might think the opposite.\n>\n\nFascinating. I think you're absolutely right that it's clear that two empty\nintervals don't conflict. If somebody wants to claim two intervals\nconflict, they need to point to at least one instant in time that is common\nbetween them.\n\nBut a major point of a primary key, it seems to me, is that it uniquely\nidentifies a row. If items are identified by a time range, non-overlapping\nor not, then the empty range can only identify one item (per value of\nwhatever other columns are in the primary key). I think for a unique key\nthe non-overlapping restriction has to be considered an additional\nrestriction on top of the usual uniqueness restriction.\n\nI suspect in many applications there will be a non-empty constraint; for\nexample, it seems quite reasonable to me for a meeting booking system to\nforbid empty meetings. But when they are allowed they should behave in the\nmathematically appropriate way.\n\nOn Tue, 21 May 2024 at 13:57, Robert Haas <robertmhaas@gmail.com> wrote:\nWhat I think is less clear is what that means for temporal primary\nkeys. As Paul pointed out upthread, in every other case, a temporal\nprimary key is at least as unique as a regular primary key, but in\nthis case, it isn't. And someone might reasonably think that a\ntemporal primary key should exclude empty ranges just as all primary\nkeys exclude nulls. Or they might think the opposite. Fascinating. I think you're absolutely right that it's clear that two empty intervals don't conflict. If somebody wants to claim two intervals conflict, they need to point to at least one instant in time that is common between them.But a major point of a primary key, it seems to me, is that it uniquely identifies a row. If items are identified by a time range, non-overlapping or not, then the empty range can only identify one item (per value of whatever other columns are in the primary key). I think for a unique key the non-overlapping restriction has to be considered an additional restriction on top of the usual uniqueness restriction.I suspect in many applications there will be a non-empty constraint; for example, it seems quite reasonable to me for a meeting booking system to forbid empty meetings. But when they are allowed they should behave in the mathematically appropriate way.", "msg_date": "Tue, 21 May 2024 14:27:29 -0400", "msg_from": "Isaac Morland <isaac.morland@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Tue, 2024-05-21 at 13:57 -0400, Robert Haas wrote:\n> What I think is less clear is what that means for temporal primary\n> keys.\n\nRight.\n\nMy message was specifically a response to the concern that there was\nsome kind of design flaw in the range types or exclusion constraints\nmechanisms.\n\nI don't believe that empty ranges represent a design flaw. If they\ndon't make sense for temporal constraints, then temporal constraints\nshould forbid them.\n\nRegards,\n\tJeff Davis\n\n\n\n", "msg_date": "Tue, 21 May 2024 12:54:47 -0700", "msg_from": "Jeff Davis <pgsql@j-davis.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 5/21/24 11:27, Isaac Morland wrote:\n> On Tue, 21 May 2024 at 13:57, Robert Haas <robertmhaas@gmail.com <mailto:robertmhaas@gmail.com>> wrote:\n> \n> What I think is less clear is what that means for temporal primary\n> keys. As Paul pointed out upthread, in every other case, a temporal\n> primary key is at least as unique as a regular primary key, but in\n> this case, it isn't. And someone might reasonably think that a\n> temporal primary key should exclude empty ranges just as all primary\n> keys exclude nulls. Or they might think the opposite.\n> \n> \n> Fascinating. I think you're absolutely right that it's clear that two empty intervals don't \n> conflict. If somebody wants to claim two intervals conflict, they need to point to at least one \n> instant in time that is common between them.\n> \n> But a major point of a primary key, it seems to me, is that it uniquely identifies a row. If items \n> are identified by a time range, non-overlapping or not, then the empty range can only identify one \n> item (per value of whatever other columns are in the primary key). I think for a unique key the \n> non-overlapping restriction has to be considered an additional restriction on top of the usual \n> uniqueness restriction.\n> \n> I suspect in many applications there will be a non-empty constraint; for example, it seems quite \n> reasonable to me for a meeting booking system to forbid empty meetings. But when they are allowed \n> they should behave in the mathematically appropriate way.\n\nFinding a way forward for temporal PKs got a lot of discussion at pgconf.dev (thanks especially to \nPeter Eisentraut and Jeff Davis!), so I wanted to summarize some options and describe what I think \nis the best approach.\n\nFirst the problem: empty ranges! A temporal PK/UNIQUE constraint is basically an exclusion \nconstraint that is `(id WITH =, valid_at WITH &&)`. But the special 'empty' value never overlaps \nanything, *including itself*. (Note it has no \"position\": [3,3) is the same as [4,4).) Since the \nexclusion constraint forbids overlapping ranges, and empties never overlap, your table can have \nduplicates. (I'm talking about \"literal uniqueness\" as discussed in [1].) For instance:\n\n CREATE EXTENSION btree_gist;\n CREATE TABLE t (id int, valid_at daterange, name text);\n ALTER TABLE t ADD CONSTRAINT tpk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS);\n INSERT INTO t VALUES (1, 'empty', 'foo');\n INSERT INTO t VALUES (1, 'empty', 'bar');\n\nMultiranges have the same problem. So what do we do about that?\n\n**Option 0**: Allow it but document it. It shouldn't happen in practice: there is no reason for an \nempty range to get into a temporal table, and it arguably doesn't mean anything. The record is true \nat no time? But of course it will happen anyway. It's a footgun and will break expectations for at \nleast some.\n\nIt causes problems for us too. If you say `SELECT name FROM t GROUP BY id, valid_at`, we recognize \nthat `name` is a functional dependency on the PK, so we allow it and give you the first row matching \neach key. You might get \"foo\" or you might get \"bar\". Also the planner uses not-nullable uniqueness \nto take many shortcuts. I couldn't create any concrete breakage there, but I bet someone else could. \nPKs that are not literally unique seems like something that would cause headaches for years.\n\n**Option 1**: Temporal PKs should automatically create a CHECK constraint that forbids empty ranges. \nShould UNIQUE constraints too? I'm tempted to say no, since sometimes users surprise us by coming up \nwith new ways to use things. For instance one way to use empty ranges is to reference a temporal \ntable from a non-temporal table, since `'empty' <@ anything` is always true (though this has \nquestionable meaning or practical use). But probably we should forbid empties for UNIQUE constraints \ntoo. Forbidding them is more aligned with the SQL standard, which says that when you have a PERIOD, \nstartcol < endcol (not <=). And it feels more consistent to treat both constraints the same way. \nFinally, if UNIQUEs do allow empties, we still risk confusing our planner.\n\nMy last patch created these CHECK constraints for PKs (but not UNIQUEs) as INTERNAL dependencies. \nIt's pretty clunky. There are lots of cases to handle, e.g. `ALTER COLUMN c TYPE` may reuse the PK \nindex or may generate a new one. And what if the user already created the same constraint? Seeing \nall the trouble giving PKs automatic (cataloged) NOT NULL constraints makes me wary about this \napproach. It's not as bad, since there is no legacy, but it's still more annoying than I expected.\n\nFinally, hanging the CHECK constraint off the PK sets us up for problems when we add true PERIODs. \nUnder 11.27 of SQL/Foundation, General Rules 2b says that defining a PERIOD should automatically add \na CHECK constraint that startcol < endcol. That is already part of my last patch in this series. But \nthat would be redundant with the constraint from the PK. And attaching the constraint to the PERIOD \nis a lot simpler than attaching it to the PK.\n\n**Option 2**: Add a new operator, called &&&, that works like && except an empty range *does* \noverlap another empty range. Empty ranges should still not overlap anything else. This would fix the \nexclusion constraint. You could add `(5, 'empty')` once but not twice. This would allow empties to \npeople who want to use them. (We would still forbid them if you define a PERIOD, because those come \nwith the CHECK constraint mentioned above.)\nAnd there is almost nothing to code. But it is mathematically suspect to say an empty range overlaps \nsomething small (something with zero width) but not something big. Surely if a && b and b <@ c, then \na && c? So this feels like the kind of elegant hack that you eventually regret.\n\n**Option 3**: Forbid empties, not as a reified CHECK constraint, but just with some code in the \nexecutor. Again we could do just PKs or PKs and UNIQUEs. Let's do both, for all the reasons above. \nNot creating a CHECK constraint is much less clunky. There is no catalog entry to create/drop. Users \ndon't wonder where it came from when they say `\\d t`. It can't conflict with constraints of their \nown. We would enforce this in ExecConstraints, where we enforce NOT NULL and CHECK constraints, for \nany table with constraints where conperiod is true. We'd also need to do this check on existing rows \nwhen you create a temporal PK/UQ. This option also requires a new field in pg_class: just as we have \nrelchecks, relhasrules, relhastriggers, etc. to let us skip work in the relcache, I assume we'd want \nrelperiods.\n\n**Option 4**: Teach GiST indexes to enforce uniqueness. We didn't discuss this at pgconf, at least \nnot in reference to the empties problem. But I was thinking about this request from Matthias for \ntemporal PKs & UQs to support `USING INDEX idx`.[2] It is confusing that a temporal index has \nindisunique, but if you try to create a unique GiST index directly we say they don't support UNIQUE \nindexes! Similarly `pg_indexam_has_property(783, 'can_unique')` returns false. There is something \nmuddled about all that. So how about we give the GiST AM handler amcanunique?\n\nAs I understand it, GiST indexes are capable of uniqueness,[3] and indeed today you can create an \nexclusion constraint with the same effect, but in the past the core had no way of asking an opclass \nwhich operator gave equality. With the stratnum support proc from 6db4598fcb (part of this patch \nseries, but reverted from v17), we could get a known operator for \"equals\". If the index's opclasses \nhad that sproc and it gave non-zero for RTEqualStrategyNumber, then CREATE UNIQUE INDEX would \nsucceed. We would just (\"just\") need to make GiST raise an error if it found a duplicate. And if \n*that* was happening, the empty ranges wouldn't cause a problem.\n\nI think Option 3 is good, but I like Option 4 a lot because (1) it doesn't assume ranges & \nmultiranges (2) it allows empties if users have some reason for them (3) since the real problem is \nduplicates, forbidding them is a more precise solution, (4) it clears up the confusing situation of \nGiST not being canunique, even though you can create an index with indisunique.\n\nOTOH it is probably more work, and it is slower than just forbidding duplicates. (The unique check \nrequires a separate index search, according to [3], as an exclusion constraint would do.) Also if we \ndo it to make GiST be canunique, that can happen separately from the temporal work.\n\nSo I'm proceeding with Option 3, which at worst can eventually become an optimization for Option 4. \nI don't think forbidding empty ranges is a great loss to be honest. But if anyone has any feedback, \nplease share: ojections, alternatives, advice---all is welcome.\n\n[1] \nhttps://www.postgresql.org/message-id/47550967-260b-4180-9791-b224859fe63e%40illuminatedcomputing.com\n[2] \nhttps://www.postgresql.org/message-id/CAEze2Wh21V66udM8cbvBBsAgyQ_5x9nfR0d3sWzbmZk%2B%2Bey7xw%40mail.gmail.com\n[3] https://dsf.berkeley.edu/papers/sigmod97-gist.pdf\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Wed, 5 Jun 2024 13:56:15 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Thu, May 9, 2024 at 5:44 PM Matthias van de Meent <boekewurm+postgres@gmail.com> wrote:\n > Additionally, because I can't create my own non-constraint-backing\n > unique GIST indexes, I can't pre-create my unique constraints\n > CONCURRENTLY as one could do for the non-temporal case\n\nWe talked about this a bit at pgconf.dev. I would like to implement it, since I agree it is an \nimportant workflow to support. Here are some thoughts about what would need to be done.\n\nFirst we could take a small step: allow non-temporal UNIQUE GiST indexes. This is possible according \nto [1], but in the past we had no way of knowing which strategy number an opclass was using for \nequality. With the stratnum support proc introduced by 6db4598fcb (reverted for v17), we could \nchange amcanunique to true for the GiST AM handler. If the index's opclasses had that sproc and it \ngave non-zero for RTEqualStrategyNumber, we would have a reliable \"definition of uniqueness\". UNIQUE \nGiST indexes would raise an error if they detected a duplicate record.\n\nIncidentally, this would also let us correct the error message about GiST not supporting unique, \nfixing the problem you raised here:\n\nOn Sun, May 12, 2024 at 8:51 AM Paul Jungwirth <pj@illuminatedcomputing.com> wrote:\n >\n > On 5/12/24 05:55, Matthias van de Meent wrote:\n > >> > pg=# CREATE UNIQUE INDEX ON temporal_testing USING gist (id, valid_during);\n > >> > ERROR: access method \"gist\" does not support unique indexes\n > >>\n > >> To me that error message seems correct. The programmer hasn't said anything about the special\n > >> temporal behavior they are looking for.\n > >\n > > But I showed that I had a GIST index that does have the indisunique\n > > flag set, which shows that GIST does support indexes with unique\n > > semantics.\n > >\n > > That I can't use CREATE UNIQUE INDEX to create such an index doesn't\n > > mean the feature doesn't exist, which is what the error message\n > > implies.\n >\n > True, the error message is not really telling the truth anymore.\n\nBut that is just regular non-temporal indexes. To avoid a long table lock you'd need a way to build \nthe index that is not just unique, but also does exclusion based on &&. We could borrow syntax from \nSQL:2011 and allow `CREATE INDEX idx ON t (id, valid_at WITHOUT OVERLAPS)`. But since CREATE INDEX \nis a lower-level concept than a constraint, it'd be better to do something more general. You can \nalready give opclasses for each indexed column. How about allowing operators as well? For instance \n`CREATE UNIQUE INDEX idx ON t (id WITH =, valid_at WITH &&)`? Then the index would know to enforce \nthose rules. This is the same data we store today in pg_constraint.conexclops. So that would get \nmoved/copied to pg_index (probably moved).\n\nThen when you add the constraint, what is the syntax? Today when you say PRIMARY KEY/UNIQUE USING \nINDEX, you don't give the column names. So how do we know it's WITHOUT OVERLAPS? I guess if the \nunderlying index has (foo WITH = [, bar WITH =], baz WITH &&) we just assume the user wants WITHOUT \nOVERLAPS, and otherwise they want a regular PK/UQ constraint?\n\nIn addition this workflow only works if you can CREATE INDEX CONCURRENTLY. I'm not sure yet if we'll \nhave problems there. I noticed that for REINDEX at least, there were plans in 2012 to support \nexclusion-constraint indexes,[2] but when the patch was committed in 2019 they had been dropped, \nwith plans to add support eventually.[3] Today they are still not supported. Maybe whatever caused \nproblems for REINDEX isn't an issue for just INDEX, but it would take more research to find out.\n\n[1] https://dsf.berkeley.edu/papers/sigmod97-gist.pdf\n[2] Original patch thread from 2012: \nhttps://www.postgresql.org/message-id/flat/CAB7nPqS%2BWYN021oQHd9GPe_5dSVcVXMvEBW_E2AV9OOEwggMHw%40mail.gmail.com#e1a372074cfdf37bf9e5b4e29ddf7b2d\n[3] Revised patch thread, committed in 2019: \nhttps://www.postgresql.org/message-id/flat/60052986-956b-4478-45ed-8bd119e9b9cf%402ndquadrant.com#74948a1044c56c5e817a5050f554ddee\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Wed, 5 Jun 2024 13:57:40 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Wed, Jun 5, 2024 at 4:56 PM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n> **Option 2**: Add a new operator, called &&&, that works like && except an empty range *does*\n> overlap another empty range. Empty ranges should still not overlap anything else. This would fix the\n> exclusion constraint. You could add `(5, 'empty')` once but not twice. This would allow empties to\n> people who want to use them. (We would still forbid them if you define a PERIOD, because those come\n> with the CHECK constraint mentioned above.)\n> And there is almost nothing to code. But it is mathematically suspect to say an empty range overlaps\n> something small (something with zero width) but not something big. Surely if a && b and b <@ c, then\n> a && c? So this feels like the kind of elegant hack that you eventually regret.\n\nI think this might be fine.\n\n> **Option 3**: Forbid empties, not as a reified CHECK constraint, but just with some code in the\n> executor. Again we could do just PKs or PKs and UNIQUEs. Let's do both, for all the reasons above.\n> Not creating a CHECK constraint is much less clunky. There is no catalog entry to create/drop. Users\n> don't wonder where it came from when they say `\\d t`. It can't conflict with constraints of their\n> own. We would enforce this in ExecConstraints, where we enforce NOT NULL and CHECK constraints, for\n> any table with constraints where conperiod is true. We'd also need to do this check on existing rows\n> when you create a temporal PK/UQ. This option also requires a new field in pg_class: just as we have\n> relchecks, relhasrules, relhastriggers, etc. to let us skip work in the relcache, I assume we'd want\n> relperiods.\n\nI don't really like the existing relhasWHATEVER fields and am not very\nkeen about adding more of them. Maybe it will turn out to be the best\nway, but finding the right times to set and unset such fields has been\nchallenging over the years, and we've had to fix some bugs. So, if you\ngo this route, I recommend looking carefully at whether there's a\nreasonable way to avoid the need for such a field. Other than that,\nthis idea seems reasonable.\n\n> **Option 4**: Teach GiST indexes to enforce uniqueness. We didn't discuss this at pgconf, at least\n> not in reference to the empties problem. But I was thinking about this request from Matthias for\n> temporal PKs & UQs to support `USING INDEX idx`.[2] It is confusing that a temporal index has\n> indisunique, but if you try to create a unique GiST index directly we say they don't support UNIQUE\n> indexes! Similarly `pg_indexam_has_property(783, 'can_unique')` returns false. There is something\n> muddled about all that. So how about we give the GiST AM handler amcanunique?\n>\n> As I understand it, GiST indexes are capable of uniqueness,[3] and indeed today you can create an\n> exclusion constraint with the same effect, but in the past the core had no way of asking an opclass\n> which operator gave equality. With the stratnum support proc from 6db4598fcb (part of this patch\n> series, but reverted from v17), we could get a known operator for \"equals\". If the index's opclasses\n> had that sproc and it gave non-zero for RTEqualStrategyNumber, then CREATE UNIQUE INDEX would\n> succeed. We would just (\"just\") need to make GiST raise an error if it found a duplicate. And if\n> *that* was happening, the empty ranges wouldn't cause a problem.\n\nIsn't this just a more hacky version of option (2)?\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Wed, 12 Jun 2024 10:31:49 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Wed, 5 Jun 2024 at 22:57, Paul Jungwirth <pj@illuminatedcomputing.com> wrote:\n>\n> On Thu, May 9, 2024 at 5:44 PM Matthias van de Meent <boekewurm+postgres@gmail.com> wrote:\n> > Additionally, because I can't create my own non-constraint-backing\n> > unique GIST indexes, I can't pre-create my unique constraints\n> > CONCURRENTLY as one could do for the non-temporal case\n>\n> We talked about this a bit at pgconf.dev. I would like to implement it, since I agree it is an\n> important workflow to support. Here are some thoughts about what would need to be done.\n>\n> First we could take a small step: allow non-temporal UNIQUE GiST indexes. This is possible according\n> to [1], but in the past we had no way of knowing which strategy number an opclass was using for\n> equality. With the stratnum support proc introduced by 6db4598fcb (reverted for v17), we could\n> change amcanunique to true for the GiST AM handler. If the index's opclasses had that sproc and it\n> gave non-zero for RTEqualStrategyNumber, we would have a reliable \"definition of uniqueness\". UNIQUE\n> GiST indexes would raise an error if they detected a duplicate record.\n\nCool.\n\n> But that is just regular non-temporal indexes. To avoid a long table lock you'd need a way to build\n> the index that is not just unique, but also does exclusion based on &&. We could borrow syntax from\n> SQL:2011 and allow `CREATE INDEX idx ON t (id, valid_at WITHOUT OVERLAPS)`. But since CREATE INDEX\n> is a lower-level concept than a constraint, it'd be better to do something more general. You can\n> already give opclasses for each indexed column. How about allowing operators as well? For instance\n> `CREATE UNIQUE INDEX idx ON t (id WITH =, valid_at WITH &&)`? Then the index would know to enforce\n> those rules.\n\nI think this looks fine. I'd like it even better if we could default\nto the equality operator that's used by the type's default btree\nopclass in this syntax; that'd make CREATE UNIQUE INDEX much less\nawkward for e.g. hash indexes.\n\n> This is the same data we store today in pg_constraint.conexclops. So that would get\n> moved/copied to pg_index (probably moved).\n\nI'd keep the pg_constraint.conexclops around: People are inevitably\ngoing to want to keep the current exclusion constraints' handling of\nduplicate empty ranges, which is different from expectations we see\nfor UNIQUE INDEX's handling.\n\n> Then when you add the constraint, what is the syntax? Today when you say PRIMARY KEY/UNIQUE USING\n> INDEX, you don't give the column names. So how do we know it's WITHOUT OVERLAPS? I guess if the\n> underlying index has (foo WITH = [, bar WITH =], baz WITH &&) we just assume the user wants WITHOUT\n> OVERLAPS, and otherwise they want a regular PK/UQ constraint?\n\nPresumably you would know this based on the pg_index.indisunique flag?\n\n> In addition this workflow only works if you can CREATE INDEX CONCURRENTLY. I'm not sure yet if we'll\n> have problems there. I noticed that for REINDEX at least, there were plans in 2012 to support\n> exclusion-constraint indexes,[2] but when the patch was committed in 2019 they had been dropped,\n> with plans to add support eventually.[3] Today they are still not supported. Maybe whatever caused\n> problems for REINDEX isn't an issue for just INDEX, but it would take more research to find out.\n\nI don't quite see where exclusion constraints get into the picture?\nIsn't this about unique indexes, not exclusion constraints? I\nunderstand exclusion constraints are backed by indexes, but that\ndoesn't have to make it a unique index, right? I mean, currently, you\ncan write an exclusion constraint that makes sure that all rows with a\ncertain prefix have the same suffix columns (given a btree-esque index\ntype with <> -operator support), which seems exactly opposite of what\nunique indexes should do.\n\nKind regards,\n\nMatthias van de Meent\nNeon (https://neon.tech)\n\n\n", "msg_date": "Wed, 12 Jun 2024 17:48:59 +0200", "msg_from": "Matthias van de Meent <boekewurm+postgres@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 6/12/24 07:31, Robert Haas wrote:\n> On Wed, Jun 5, 2024 at 4:56 PM Paul Jungwirth\n>> **Option 3**: Forbid empties, not as a reified CHECK constraint, but just with some code in the\n>> executor. Again we could do just PKs or PKs and UNIQUEs. Let's do both, for all the reasons above.\n>> Not creating a CHECK constraint is much less clunky. There is no catalog entry to create/drop. Users\n>> don't wonder where it came from when they say `\\d t`. It can't conflict with constraints of their\n>> own. We would enforce this in ExecConstraints, where we enforce NOT NULL and CHECK constraints, for\n>> any table with constraints where conperiod is true. We'd also need to do this check on existing rows\n>> when you create a temporal PK/UQ. This option also requires a new field in pg_class: just as we have\n>> relchecks, relhasrules, relhastriggers, etc. to let us skip work in the relcache, I assume we'd want\n>> relperiods.\n> \n> I don't really like the existing relhasWHATEVER fields and am not very\n> keen about adding more of them. Maybe it will turn out to be the best\n> way, but finding the right times to set and unset such fields has been\n> challenging over the years, and we've had to fix some bugs. So, if you\n> go this route, I recommend looking carefully at whether there's a\n> reasonable way to avoid the need for such a field. Other than that,\n> this idea seems reasonable.\n\nHere is a reworked patch series following Option 3: rather than using a cataloged CHECK constraint, \nwe just do the check in the executor (but in the same place we do CHECK constraints). We also make \nsure existing rows are empty-free when you add the index.\n\nI took the reverted commits from v17, squashed the minor fixes, rebased everything, and added a new \npatch to forbid empty ranges/multiranges wherever there is a WITHOUT OVERLAPS constraint. It comes \nright after the PK patch in the series. I don't intend it to be committed separately, but I thought \nit would make review easier, since the other code has been reviewed a lot already.\n\nI did add a relperiods column, but I have a mostly-complete branch here (not included in the \npatches) that does without. Not maintaining that new column is simpler for sure. The consequence is \nthat the relcache must scan for WITHOUT OVERLAPS constraints on every table. That seems like a high \nperformance cost for a feature most databases won't use. Since we try hard to avoid that kind of \nthing (e.g. [1]), I thought adding relperiods would be preferred. If that's the wrong tradeoff I can \nchange it.\n\nOne idea I considered was to include WITHOUT OVERLAPS constraints in the relchecks count. But that \nfeels pretty hacky, and it is harder than it sounds, since index constraints are handled pretty far \nfrom where we update relchecks now. It doesn't save any complexity (but rather makes it worse), so \nthe only reason to do it would be to avoid expanding pg_class records.\n\nThese patches still add some if-clauses to psql and pg_dump that say `if (fout->remoteVersion >= \n170000)`. But if I change them to 180000 I get failures in e.g. the pg_dump tests. What do other \npeople do here before a release is cut?\n\nRebased on 3e53492aa7.\n\n[1] \nhttps://github.com/postgres/postgres/blob/5d6c64d290978dab76c00460ba809156874be035/src/backend/utils/cache/relcache.c#L688-L713\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Thu, 27 Jun 2024 14:56:15 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Thu, Jun 27, 2024 at 5:56 PM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n> I did add a relperiods column, but I have a mostly-complete branch here (not included in the\n> patches) that does without. Not maintaining that new column is simpler for sure. The consequence is\n> that the relcache must scan for WITHOUT OVERLAPS constraints on every table. That seems like a high\n> performance cost for a feature most databases won't use. Since we try hard to avoid that kind of\n> thing (e.g. [1]), I thought adding relperiods would be preferred. If that's the wrong tradeoff I can\n> change it.\n\nI'm sure that you are right that nobody is going to like an extra\nindex scan just to find periods. So, suppose we do as you propose and\nadd relperiods. In the situation where we are adding the first period\n(or whatever the right term is) to the table, what kind of lock are we\nholding on the table? Conversely, when we drop the last period, what\nkind of lock are we holding on the table? If, hypothetically, both\nanswers were AccessExclusiveLock, this might not be too bad, but if\nyou say \"ShareLock\" then we've got a lot of problems; that's not even\nself-exclusive.\n\n> These patches still add some if-clauses to psql and pg_dump that say `if (fout->remoteVersion >=\n> 170000)`. But if I change them to 180000 I get failures in e.g. the pg_dump tests. What do other\n> people do here before a release is cut?\n\nSometimes I make a commit that bumps the version number (update major\nversion in src/tools/version_stamp.pl, then run it, then run autoconf,\nthen commit). Then I build my patch set on top of that. Once the\nactual major release bump happens, I just drop that commit from the\nstack.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 28 Jun 2024 08:18:07 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Here is v35 of this patch series, with a few small changes. I renamed relperiods to \nrelwithoutoverlaps, since that is more accurate about what we're counting. (PERIODs come in a later \npatch and we don't need to count them.) Also I cleaned up the branches in psql/pg_dump on version \nnow that we're officially on v18.\n\nOn 6/28/24 05:18, Robert Haas wrote:\n> On Thu, Jun 27, 2024 at 5:56 PM Paul Jungwirth\n> <pj@illuminatedcomputing.com> wrote:\n>> I did add a relperiods column, but I have a mostly-complete branch here (not included in the\n>> patches) that does without. Not maintaining that new column is simpler for sure. The consequence is\n>> that the relcache must scan for WITHOUT OVERLAPS constraints on every table. That seems like a high\n>> performance cost for a feature most databases won't use. Since we try hard to avoid that kind of\n>> thing (e.g. [1]), I thought adding relperiods would be preferred. If that's the wrong tradeoff I can\n>> change it.\n> \n> I'm sure that you are right that nobody is going to like an extra\n> index scan just to find periods. So, suppose we do as you propose and\n> add relperiods. In the situation where we are adding the first period\n> (or whatever the right term is) to the table, what kind of lock are we\n> holding on the table? Conversely, when we drop the last period, what\n> kind of lock are we holding on the table? If, hypothetically, both\n> answers were AccessExclusiveLock, this might not be too bad, but if\n> you say \"ShareLock\" then we've got a lot of problems; that's not even\n> self-exclusive.\n\nThis happens when creating a PRIMARY KEY or UNIQUE constraint, so we already have an \nAccessExclusiveLock on the table (whether creating or dropping). If we ever supported CREATE INDEX \nCONCURRENTLY for this, we would need to be careful about where we update the new field, but today we \ndon't support that for exclusion constraints.\n\nRebased to 4b211003ec.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Fri, 5 Jul 2024 12:22:31 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Thu, Jun 6, 2024 at 4:56 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On 5/21/24 11:27, Isaac Morland wrote:\n> > On Tue, 21 May 2024 at 13:57, Robert Haas <robertmhaas@gmail.com <mailto:robertmhaas@gmail.com>> wrote:\n> >\n> > What I think is less clear is what that means for temporal primary\n> > keys. As Paul pointed out upthread, in every other case, a temporal\n> > primary key is at least as unique as a regular primary key, but in\n> > this case, it isn't. And someone might reasonably think that a\n> > temporal primary key should exclude empty ranges just as all primary\n> > keys exclude nulls. Or they might think the opposite.\n> >\n> >\n> > Fascinating. I think you're absolutely right that it's clear that two empty intervals don't\n> > conflict. If somebody wants to claim two intervals conflict, they need to point to at least one\n> > instant in time that is common between them.\n> >\n> > But a major point of a primary key, it seems to me, is that it uniquely identifies a row. If items\n> > are identified by a time range, non-overlapping or not, then the empty range can only identify one\n> > item (per value of whatever other columns are in the primary key). I think for a unique key the\n> > non-overlapping restriction has to be considered an additional restriction on top of the usual\n> > uniqueness restriction.\n> >\n> > I suspect in many applications there will be a non-empty constraint; for example, it seems quite\n> > reasonable to me for a meeting booking system to forbid empty meetings. But when they are allowed\n> > they should behave in the mathematically appropriate way.\n>\n> Finding a way forward for temporal PKs got a lot of discussion at pgconf.dev (thanks especially to\n> Peter Eisentraut and Jeff Davis!), so I wanted to summarize some options and describe what I think\n> is the best approach.\n>\n> First the problem: empty ranges! A temporal PK/UNIQUE constraint is basically an exclusion\n> constraint that is `(id WITH =, valid_at WITH &&)`. But the special 'empty' value never overlaps\n> anything, *including itself*. (Note it has no \"position\": [3,3) is the same as [4,4).) Since the\n> exclusion constraint forbids overlapping ranges, and empties never overlap, your table can have\n> duplicates. (I'm talking about \"literal uniqueness\" as discussed in [1].) For instance:\n>\n> CREATE EXTENSION btree_gist;\n> CREATE TABLE t (id int, valid_at daterange, name text);\n> ALTER TABLE t ADD CONSTRAINT tpk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS);\n> INSERT INTO t VALUES (1, 'empty', 'foo');\n> INSERT INTO t VALUES (1, 'empty', 'bar');\n>\n> Multiranges have the same problem. So what do we do about that?\n>\n> **Option 0**: Allow it but document it. It shouldn't happen in practice: there is no reason for an\n> empty range to get into a temporal table, and it arguably doesn't mean anything. The record is true\n> at no time? But of course it will happen anyway. It's a footgun and will break expectations for at\n> least some.\n>\n> It causes problems for us too. If you say `SELECT name FROM t GROUP BY id, valid_at`, we recognize\n> that `name` is a functional dependency on the PK, so we allow it and give you the first row matching\n> each key. You might get \"foo\" or you might get \"bar\". Also the planner uses not-nullable uniqueness\n> to take many shortcuts. I couldn't create any concrete breakage there, but I bet someone else could.\n> PKs that are not literally unique seems like something that would cause headaches for years.\n>\n> **Option 1**: Temporal PKs should automatically create a CHECK constraint that forbids empty ranges.\n> Should UNIQUE constraints too? I'm tempted to say no, since sometimes users surprise us by coming up\n> with new ways to use things. For instance one way to use empty ranges is to reference a temporal\n> table from a non-temporal table, since `'empty' <@ anything` is always true (though this has\n> questionable meaning or practical use). But probably we should forbid empties for UNIQUE constraints\n> too. Forbidding them is more aligned with the SQL standard, which says that when you have a PERIOD,\n> startcol < endcol (not <=). And it feels more consistent to treat both constraints the same way.\n> Finally, if UNIQUEs do allow empties, we still risk confusing our planner.\n>\n> My last patch created these CHECK constraints for PKs (but not UNIQUEs) as INTERNAL dependencies.\n> It's pretty clunky. There are lots of cases to handle, e.g. `ALTER COLUMN c TYPE` may reuse the PK\n> index or may generate a new one. And what if the user already created the same constraint? Seeing\n> all the trouble giving PKs automatic (cataloged) NOT NULL constraints makes me wary about this\n> approach. It's not as bad, since there is no legacy, but it's still more annoying than I expected.\n>\n> Finally, hanging the CHECK constraint off the PK sets us up for problems when we add true PERIODs.\n> Under 11.27 of SQL/Foundation, General Rules 2b says that defining a PERIOD should automatically add\n> a CHECK constraint that startcol < endcol. That is already part of my last patch in this series. But\n> that would be redundant with the constraint from the PK. And attaching the constraint to the PERIOD\n> is a lot simpler than attaching it to the PK.\n>\n> **Option 2**: Add a new operator, called &&&, that works like && except an empty range *does*\n> overlap another empty range. Empty ranges should still not overlap anything else. This would fix the\n> exclusion constraint. You could add `(5, 'empty')` once but not twice. This would allow empties to\n> people who want to use them. (We would still forbid them if you define a PERIOD, because those come\n> with the CHECK constraint mentioned above.)\n> And there is almost nothing to code. But it is mathematically suspect to say an empty range overlaps\n> something small (something with zero width) but not something big. Surely if a && b and b <@ c, then\n> a && c? So this feels like the kind of elegant hack that you eventually regret.\n>\n> **Option 3**: Forbid empties, not as a reified CHECK constraint, but just with some code in the\n> executor. Again we could do just PKs or PKs and UNIQUEs. Let's do both, for all the reasons above.\n> Not creating a CHECK constraint is much less clunky. There is no catalog entry to create/drop. Users\n> don't wonder where it came from when they say `\\d t`. It can't conflict with constraints of their\n> own. We would enforce this in ExecConstraints, where we enforce NOT NULL and CHECK constraints, for\n> any table with constraints where conperiod is true. We'd also need to do this check on existing rows\n> when you create a temporal PK/UQ. This option also requires a new field in pg_class: just as we have\n> relchecks, relhasrules, relhastriggers, etc. to let us skip work in the relcache, I assume we'd want\n> relperiods.\n>\n> **Option 4**: Teach GiST indexes to enforce uniqueness. We didn't discuss this at pgconf, at least\n> not in reference to the empties problem. But I was thinking about this request from Matthias for\n> temporal PKs & UQs to support `USING INDEX idx`.[2] It is confusing that a temporal index has\n> indisunique, but if you try to create a unique GiST index directly we say they don't support UNIQUE\n> indexes! Similarly `pg_indexam_has_property(783, 'can_unique')` returns false. There is something\n> muddled about all that. So how about we give the GiST AM handler amcanunique?\n>\nI think we can Forbid empties,not not mess with pg_class.\n\n\nto make the communication smooth, i've set the base commit to\n46a0cd4cefb4d9b462d8cc4df5e7ecdd190bea92\n{Add temporal PRIMARY KEY and UNIQUE constraints}\nhttps://git.postgresql.org/cgit/postgresql.git/commit/?id=46a0cd4cefb4d9b462d8cc4df5e7ecdd190bea92\nyou can git reset --hard 46a0cd4cefb4d9b462d8cc4df5e7ecdd190bea92\nthen apply the attached patch.\n\n\n\nI hope I understand it correctly.\npreviously revert is only because the special value: empty.\ni tried to use the operator &&&, new gist strategy number, pg_amop\nentry to solve the problem.\nNow with the applied patch, if the range column is specified WITHOUT OVERLAPS,\nthen this column is not allowed to have any empty range value.\n\n\n\nlogic work through:\n* duplicate logic of range_overlaps but disallow empty value. also\nhave the operator &&&, (almost equivalent to &&)\n* add new gist strategy number\n* thanks to add stratnum GiST support function\n(https://git.postgresql.org/cgit/postgresql.git/commit/?id=6db4598fcb82a87a683c4572707e522504830a2b)\nnow we can set the strategy number to the mapped new function\n(equivalent to range_overlaps, but error out empty value)\n* in ComputeIndexAttrs, set the strategy number to the newly created\nStrategyNumber in \"else if (iswithoutoverlaps)\" block.\n* Similarly refactor src/backend/utils/adt/rangetypes_gist.c make the\nindex value validation using newly created function.\n\n\n\nfunction name, error message maybe not great now, but it works.\n------full demo, also see the comments.\nDROP TABLE if exists temporal_rng;\nCREATE TABLE temporal_rng (id int4range, valid_at tsrange);\nALTER TABLE temporal_rng\nADD CONSTRAINT temporal_rng_pk\nPRIMARY KEY (id, valid_at WITHOUT OVERLAPS);\n--should be fine.\nINSERT INTO temporal_rng VALUES ('empty', '[2022-01-01,2022-01-02]');\n--will error out, period column, empty range not allowed\nINSERT INTO temporal_rng VALUES ('[3,3]', 'empty');\n\nALTER TABLE temporal_rng DROP CONSTRAINT temporal_rng_pk;\n--period constraint dropped, now should be fine.\nINSERT INTO temporal_rng VALUES ('[3,3]', 'empty');\n\n--reinstall constraint, should error out\n--because existing one row has empty value.\nALTER TABLE temporal_rng\nADD CONSTRAINT temporal_rng_pk\nPRIMARY KEY (id, valid_at WITHOUT OVERLAPS);\ndelete from temporal_rng where id = '[3,3]';\n\n--reinstall constraint, should be fine, because empty value removed.\nALTER TABLE temporal_rng\nADD CONSTRAINT temporal_rng_pk\nPRIMARY KEY (id, valid_at WITHOUT OVERLAPS);", "msg_date": "Tue, 9 Jul 2024 15:15:39 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 7/9/24 00:15, jian he wrote:\n>> **Option 2**: Add a new operator, called &&&, that works like && except an empty range *does*\n>> overlap another empty range. Empty ranges should still not overlap anything else. This would fix the\n>> exclusion constraint. You could add `(5, 'empty')` once but not twice. This would allow empties to\n>> people who want to use them. (We would still forbid them if you define a PERIOD, because those come\n>> with the CHECK constraint mentioned above.)\n>> And there is almost nothing to code. But it is mathematically suspect to say an empty range overlaps\n>> something small (something with zero width) but not something big. Surely if a && b and b <@ c, then\n>> a && c? So this feels like the kind of elegant hack that you eventually regret.\n> I think we can Forbid empties,not not mess with pg_class.\n> \n> to make the communication smooth, i've set the base commit to\n> 46a0cd4cefb4d9b462d8cc4df5e7ecdd190bea92\n> {Add temporal PRIMARY KEY and UNIQUE constraints}\n> https://git.postgresql.org/cgit/postgresql.git/commit/?id=46a0cd4cefb4d9b462d8cc4df5e7ecdd190bea92\n> you can git reset --hard 46a0cd4cefb4d9b462d8cc4df5e7ecdd190bea92\n> then apply the attached patch.\n> \n> I hope I understand it correctly.\n> previously revert is only because the special value: empty.\n> i tried to use the operator &&&, new gist strategy number, pg_amop\n> entry to solve the problem.\n> Now with the applied patch, if the range column is specified WITHOUT OVERLAPS,\n> then this column is not allowed to have any empty range value.\n> \n> logic work through:\n> * duplicate logic of range_overlaps but disallow empty value. also\n> have the operator &&&, (almost equivalent to &&)\n> * add new gist strategy number\n> * thanks to add stratnum GiST support function\n> (https://git.postgresql.org/cgit/postgresql.git/commit/?id=6db4598fcb82a87a683c4572707e522504830a2b)\n> now we can set the strategy number to the mapped new function\n> (equivalent to range_overlaps, but error out empty value)\n> * in ComputeIndexAttrs, set the strategy number to the newly created\n> StrategyNumber in \"else if (iswithoutoverlaps)\" block.\n> * Similarly refactor src/backend/utils/adt/rangetypes_gist.c make the\n> index value validation using newly created function.\n\nI like this approach a lot, but I'd like to hear what some other people think?\n\nJian he's &&& operator is similar to what I proposed upthread, but when either operand is an empty \nvalue it simply raises an error. (It should be an ereport, not an elog, and I think \nmultirange_overlaps_multirange_internal is missing the empty check, but I can clean things up when I \nintegrate it into the patch series.)\n\nThis is much simpler than everything I'm doing: checking for empties in the executor phase, adding a \nfield to pg_class, setting things in the relcache, and checking for empties in existing rows when \nyou add an index. This patch uses existing infrastructure to do all the work. It seems like a much \ncleaner solution.\n\nUnlike my proposed &&& operator, it doesn't have weird mathematical consequences.\n\nAt first I thought raising an error was not great, but it's the same thing you get when you divide \nby zero. It's fine for an operator to have a restricted domain of inputs. And we would only use this \ninternally for primary keys and unique constraints, where indeed raising an error is just what we want.\n\nIf I don't hear objections (or think of something myself :-), I'm inclined to use this approach.\n\nBut what do people think?\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Wed, 17 Jul 2024 20:34:42 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 7/17/24 20:34, Paul Jungwirth wrote:\n> I like this approach a lot, but I'd like to hear what some other people think?\n> \n> Jian he's &&& operator is similar to what I proposed upthread, but when either operand is an empty \n> value it simply raises an error. (It should be an ereport, not an elog, and I think \n> multirange_overlaps_multirange_internal is missing the empty check, but I can clean things up when I \n> integrate it into the patch series.)\n\nI thought of a possible problem: this operator works great if there are already rows in the table, \nbut what if the *first row you insert* has an empty range? Then there is nothing to compare against, \nso the operator will never be used. Right?\n\nExcept when I test it, it still works! After running `make installcheck`, I did this:\n\nregression=# truncate temporal_rng cascade;\nNOTICE: truncate cascades to table \"temporal_fk_rng2rng\"\nTRUNCATE TABLE\nregression=# insert into temporal_rng values ('[1,2)', 'empty');\nERROR: range cannot be empty\n\nMy mental model must be wrong. Can anyone explain what is happening there? Is it something we can \ndepend on?\n\nSo I swapped in the &&& patch, cleaned it up, and added tests. But something is wrong. After I get \none failure from an empty, I keep getting failures, even though the table is empty:\n\nregression=# truncate temporal_rng cascade;\nNOTICE: truncate cascades to table \"temporal_fk_rng2rng\"\nTRUNCATE TABLE\nregression=# insert into temporal_rng values ('[1,2)', '[2000-01-01,2010-01-01)'); -- ok so far\nINSERT 0 1\nregression=# insert into temporal_rng values ('[1,2)', 'empty'); -- should fail and does\nERROR: range cannot be empty\nregression=# insert into temporal_rng values ('[1,2)', '[2010-01-01,2020-01-01)'); -- uh oh\nERROR: range cannot be empty\nregression=# truncate temporal_rng cascade;\nNOTICE: truncate cascades to table \"temporal_fk_rng2rng\"\nTRUNCATE TABLE\nregression=# insert into temporal_rng values ('[1,2)', '[2000-01-01,2010-01-01)'); -- ok so far\nINSERT 0 1\nregression=# insert into temporal_rng values ('[1,2)', '[2010-01-01,2020-01-01)'); -- ok now\nINSERT 0 1\n\nIt looks like the index is getting corrupted. Continuing from the above:\n\nregression=# create extension pageinspect;\nCREATE EXTENSION\nregression=# select gist_page_items(get_raw_page('temporal_rng_pk', 0), 'temporal_rng_pk');\n gist_page_items\n----------------------------------------------------------------------------\n (1,\"(0,1)\",40,f,\"(id, valid_at)=(\"\"[1,2)\"\", \"\"[2000-01-01,2010-01-01)\"\")\")\n (2,\"(0,2)\",40,f,\"(id, valid_at)=(\"\"[1,2)\"\", \"\"[2010-01-01,2020-01-01)\"\")\")\n(2 rows)\n\nregression=# insert into temporal_rng values ('[1,2)', 'empty');\nERROR: range cannot be empty\nregression=# select gist_page_items(get_raw_page('temporal_rng_pk', 0), 'temporal_rng_pk');\n gist_page_items\n----------------------------------------------------------------------------\n (1,\"(0,1)\",40,f,\"(id, valid_at)=(\"\"[1,2)\"\", \"\"[2000-01-01,2010-01-01)\"\")\")\n (2,\"(0,2)\",40,f,\"(id, valid_at)=(\"\"[1,2)\"\", \"\"[2010-01-01,2020-01-01)\"\")\")\n (3,\"(0,3)\",32,f,\"(id, valid_at)=(\"\"[1,2)\"\", empty)\")\n(3 rows)\n\nSo maybe this is a bad place to ereport? Or is this a deeper bug with GiST? Here is where we're \ndoing it:\n\n#0 range_nonempty_overlaps_internal (typcache=0x635a7fbf67f0, r1=0x635a7fc11f20, r2=0x635a7fc11f40) \nat rangetypes.c:876\n#1 0x0000635a7f06175d in range_gist_consistent_leaf_range (typcache=0x635a7fbf67f0, strategy=31, \nkey=0x635a7fc11f20, query=0x635a7fc11f40)\n at rangetypes_gist.c:1076\n#2 0x0000635a7f05fc9a in range_gist_consistent (fcinfo=0x7ffcd20f9f60) at rangetypes_gist.c:216\n#3 0x0000635a7f12d780 in FunctionCall5Coll (flinfo=0x635a7fb44eb8, collation=0, \narg1=140723832725648, arg2=109240340727454, arg3=31, arg4=0,\n arg5=140723832725567) at fmgr.c:1242\n#4 0x0000635a7e999af6 in gistindex_keytest (scan=0x635a7fb44d50, tuple=0x7d155c0a3fd0, \npage=0x7d155c0a2000 \"\", offset=1, recheck_p=0x7ffcd20fa129,\n recheck_distances_p=0x7ffcd20fa12a) at gistget.c:221\n#5 0x0000635a7e99a109 in gistScanPage (scan=0x635a7fb44d50, pageItem=0x7ffcd20fa1e0, \nmyDistances=0x0, tbm=0x0, ntids=0x0) at gistget.c:436\n#6 0x0000635a7e99a797 in gistgettuple (scan=0x635a7fb44d50, dir=ForwardScanDirection) at gistget.c:637\n#7 0x0000635a7e9e4d38 in index_getnext_tid (scan=0x635a7fb44d50, direction=ForwardScanDirection) at \nindexam.c:590\n#8 0x0000635a7e9e4f7d in index_getnext_slot (scan=0x635a7fb44d50, direction=ForwardScanDirection, \nslot=0x635a7fb44950) at indexam.c:682\n#9 0x0000635a7ec5690b in check_exclusion_or_unique_constraint (heap=0x7d1560cea348, \nindex=0x7d1560cedd98, indexInfo=0x635a7fb44c40, tupleid=0x635a7fb44580,\n values=0x7ffcd20faf00, isnull=0x7ffcd20faee0, estate=0x635a7fb434a0, newIndex=false, \nwaitMode=CEOUC_WAIT, violationOK=false, conflictTid=0x0)\n at execIndexing.c:780\n#10 0x0000635a7ec55c58 in ExecInsertIndexTuples (resultRelInfo=0x635a7fb43930, slot=0x635a7fb44550, \nestate=0x635a7fb434a0, update=false, noDupErr=false,\n specConflict=0x0, arbiterIndexes=0x0, onlySummarizing=false) at execIndexing.c:483\n#11 0x0000635a7eca38a2 in ExecInsert (context=0x7ffcd20fb1b0, resultRelInfo=0x635a7fb43930, \nslot=0x635a7fb44550, canSetTag=true, inserted_tuple=0x0,\n insert_destrel=0x0) at nodeModifyTable.c:1145\n\nIs there anything I can do to save this &&& idea? I've attached the patches I'm working with, \nrebased to cd85ae1114.\n\nIf ereport just won't work, then I might explore other definitions of a &&& operator. It was really \nnice to have such a clean solution.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Thu, 18 Jul 2024 11:39:09 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 7/18/24 11:39, Paul Jungwirth wrote:\n> So I swapped in the &&& patch, cleaned it up, and added tests. But something is wrong. After I get \n> one failure from an empty, I keep getting failures, even though the table is empty:\n> \n> regression=# truncate temporal_rng cascade;\n> NOTICE:  truncate cascades to table \"temporal_fk_rng2rng\"\n> TRUNCATE TABLE\n> regression=# insert into temporal_rng values ('[1,2)', '[2000-01-01,2010-01-01)'); -- ok so far\n> INSERT 0 1\n> regression=# insert into temporal_rng values ('[1,2)', 'empty'); -- should fail and does\n> ERROR:  range cannot be empty\n> regression=# insert into temporal_rng values ('[1,2)', '[2010-01-01,2020-01-01)'); -- uh oh\n> ERROR:  range cannot be empty\n> regression=# truncate temporal_rng cascade;\n> NOTICE:  truncate cascades to table \"temporal_fk_rng2rng\"\n> TRUNCATE TABLE\n> regression=# insert into temporal_rng values ('[1,2)', '[2000-01-01,2010-01-01)'); -- ok so far\n> INSERT 0 1\n> regression=# insert into temporal_rng values ('[1,2)', '[2010-01-01,2020-01-01)'); -- ok now\n> INSERT 0 1\n> \n> It looks like the index is getting corrupted. Continuing from the above:\n> \n> regression=# create extension pageinspect;\n> CREATE EXTENSION\n> regression=# select gist_page_items(get_raw_page('temporal_rng_pk', 0), 'temporal_rng_pk');\n>                               gist_page_items\n> ----------------------------------------------------------------------------\n>  (1,\"(0,1)\",40,f,\"(id, valid_at)=(\"\"[1,2)\"\", \"\"[2000-01-01,2010-01-01)\"\")\")\n>  (2,\"(0,2)\",40,f,\"(id, valid_at)=(\"\"[1,2)\"\", \"\"[2010-01-01,2020-01-01)\"\")\")\n> (2 rows)\n> \n> regression=# insert into temporal_rng values ('[1,2)', 'empty');\n> ERROR:  range cannot be empty\n> regression=# select gist_page_items(get_raw_page('temporal_rng_pk', 0), 'temporal_rng_pk');\n>                               gist_page_items\n> ----------------------------------------------------------------------------\n>  (1,\"(0,1)\",40,f,\"(id, valid_at)=(\"\"[1,2)\"\", \"\"[2000-01-01,2010-01-01)\"\")\")\n>  (2,\"(0,2)\",40,f,\"(id, valid_at)=(\"\"[1,2)\"\", \"\"[2010-01-01,2020-01-01)\"\")\")\n>  (3,\"(0,3)\",32,f,\"(id, valid_at)=(\"\"[1,2)\"\", empty)\")\n> (3 rows)\n\nI realized this isn't index corruption, just MVCC. The exclusion constraint is checked after we\nupdate the index, which is why the row gets left behind. But it doesn't cause any wrong answers, and\nif you vacuum the table the row goes away.\n\nThis also explains my confusion here:\n\n> I thought of a possible problem: this operator works great if there are already rows in the table, \n> but what if the *first row you insert* has an empty range? Then there is nothing to compare against, \n> so the operator will never be used. Right?\n> \n> Except when I test it, it still works!\n\nThe first row still does a comparison because when we check the exclusion constraint, there is a\ncomparison between the query and the key we just inserted. (When I say \"query\" I don't mean a SQL\nquery, but the value used to search the index that is compared against its keys.)\n\nSo I'm glad I didn't stumble on a GiST bug, but I think it means ereporting from an exclusion operator\nis not a workable approach. Failures leave behind invalid tuples, and future (valid) tuples can fail if\nwe compare to those invalid tuples. Since MVCC visibility is stored in the heap, not in the index, it's\nnot really accessible to us here. So far I don't have any ideas to rescue this idea, even though I like\nit a lot. So I will go back to the executor idea we discussed at pgconf.dev.\n\nOne tempting alternative though is to let exclusion constraints do the not-empty check, instead of\nputting it in the executor. It would be an extra check we do only when the constraint has\npg_constraint.conperiod. Then we don't need to add & maintain pg_class.relwithoutoverlaps, and we don't\nneed a relcache change, and we don't need so much extra code to check existing rows when you add the\nconstraint. It doesn't use the existing available exclusion constraint functionality, but if we're\nwilling to extend the executor to know about WITHOUT OVERLAPS, I guess we could teach exclusion\nconstraints about it instead. Doing the check there does seem to have better locality with the feature.\nSo I think I will try that out as well.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Tue, 23 Jul 2024 09:08:05 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Wed, Jul 24, 2024 at 12:08 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On 7/18/24 11:39, Paul Jungwirth wrote:\n> > So I swapped in the &&& patch, cleaned it up, and added tests. But something is wrong. After I get\n> > one failure from an empty, I keep getting failures, even though the table is empty:\n> >\n> > regression=# truncate temporal_rng cascade;\n> > NOTICE: truncate cascades to table \"temporal_fk_rng2rng\"\n> > TRUNCATE TABLE\n> > regression=# insert into temporal_rng values ('[1,2)', '[2000-01-01,2010-01-01)'); -- ok so far\n> > INSERT 0 1\n> > regression=# insert into temporal_rng values ('[1,2)', 'empty'); -- should fail and does\n> > ERROR: range cannot be empty\n> > regression=# insert into temporal_rng values ('[1,2)', '[2010-01-01,2020-01-01)'); -- uh oh\n> > ERROR: range cannot be empty\n> > regression=# truncate temporal_rng cascade;\n> > NOTICE: truncate cascades to table \"temporal_fk_rng2rng\"\n> > TRUNCATE TABLE\n> > regression=# insert into temporal_rng values ('[1,2)', '[2000-01-01,2010-01-01)'); -- ok so far\n> > INSERT 0 1\n> > regression=# insert into temporal_rng values ('[1,2)', '[2010-01-01,2020-01-01)'); -- ok now\n> > INSERT 0 1\n> >\n> > It looks like the index is getting corrupted. Continuing from the above:\n> >\n> > regression=# create extension pageinspect;\n> > CREATE EXTENSION\n> > regression=# select gist_page_items(get_raw_page('temporal_rng_pk', 0), 'temporal_rng_pk');\n> > gist_page_items\n> > ----------------------------------------------------------------------------\n> > (1,\"(0,1)\",40,f,\"(id, valid_at)=(\"\"[1,2)\"\", \"\"[2000-01-01,2010-01-01)\"\")\")\n> > (2,\"(0,2)\",40,f,\"(id, valid_at)=(\"\"[1,2)\"\", \"\"[2010-01-01,2020-01-01)\"\")\")\n> > (2 rows)\n> >\n> > regression=# insert into temporal_rng values ('[1,2)', 'empty');\n> > ERROR: range cannot be empty\n> > regression=# select gist_page_items(get_raw_page('temporal_rng_pk', 0), 'temporal_rng_pk');\n> > gist_page_items\n> > ----------------------------------------------------------------------------\n> > (1,\"(0,1)\",40,f,\"(id, valid_at)=(\"\"[1,2)\"\", \"\"[2000-01-01,2010-01-01)\"\")\")\n> > (2,\"(0,2)\",40,f,\"(id, valid_at)=(\"\"[1,2)\"\", \"\"[2010-01-01,2020-01-01)\"\")\")\n> > (3,\"(0,3)\",32,f,\"(id, valid_at)=(\"\"[1,2)\"\", empty)\")\n> > (3 rows)\n>\n> I realized this isn't index corruption, just MVCC. The exclusion constraint is checked after we\n> update the index, which is why the row gets left behind. But it doesn't cause any wrong answers, and\n> if you vacuum the table the row goes away.\n>\n> This also explains my confusion here:\n>\n> > I thought of a possible problem: this operator works great if there are already rows in the table,\n> > but what if the *first row you insert* has an empty range? Then there is nothing to compare against,\n> > so the operator will never be used. Right?\n> >\n> > Except when I test it, it still works!\n>\n> The first row still does a comparison because when we check the exclusion constraint, there is a\n> comparison between the query and the key we just inserted. (When I say \"query\" I don't mean a SQL\n> query, but the value used to search the index that is compared against its keys.)\n>\n> So I'm glad I didn't stumble on a GiST bug, but I think it means ereporting from an exclusion operator\n> is not a workable approach. Failures leave behind invalid tuples, and future (valid) tuples can fail if\n> we compare to those invalid tuples. Since MVCC visibility is stored in the heap, not in the index, it's\n> not really accessible to us here. So far I don't have any ideas to rescue this idea, even though I like\n> it a lot. So I will go back to the executor idea we discussed at pgconf.dev.\n>\n\nanother kind of crazy idea.\ninstead of \"ERROR: range cannot be empty\"\nlet it return true.\nso 'empty'::int4range &&& 'empty'; return true.\n\none downside is, if your first row period column is empty, then you\ncan not insert any new rows\nthat have the same non-period key column.\n\nfor example:\ndrop table if exists temporal_rng1 ;\nCREATE TABLE temporal_rng1 (\n id int4range,\n valid_at int4range,\n CONSTRAINT temporal_rng1_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS)\n);\ninsert into temporal_rng1 values ('[1,2]', 'empty');\n\nIn this context, now, you cannot insert any new rows whose id is equal\nto '[1,2]'.\n\n\n----but if your first row is not empty, then you won't have empty.\ntruncate temporal_rng1;\ninsert into temporal_rng1 values ('[1,2]', '[3,4]');\n\nthen\ninsert into temporal_rng1 values ('[1,2]', 'empty'); --will fail.\n\n\nIn summary, you will have exactly one empty, no other values (if the\nfirst row is empty).\nor you will have values and not empty values at all.\n\n\n", "msg_date": "Thu, 25 Jul 2024 13:57:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 7/23/24 09:08, Paul Jungwirth wrote:\n> One tempting alternative though is to let exclusion constraints do the not-empty check, instead of\n> putting it in the executor. It would be an extra check we do only when the constraint has\n> pg_constraint.conperiod. Then we don't need to add & maintain pg_class.relwithoutoverlaps, and we don't\n> need a relcache change, and we don't need so much extra code to check existing rows when you add the\n> constraint. It doesn't use the existing available exclusion constraint functionality, but if we're\n> willing to extend the executor to know about WITHOUT OVERLAPS, I guess we could teach exclusion\n> constraints about it instead. Doing the check there does seem to have better locality with the feature.\n> So I think I will try that out as well.\n\nHere is a patch moving the not-empty check into check_exclusion_or_unique_constraint. That is a more \nlogical place for it than ExecConstraints, since WITHOUT OVERLAPS is part of the index constraint \n(not a CHECK constraint). At that point we've already looked up all the information we need. So \nthere is no extra cost for non-temporal tables, and no need to change pg_class or add to the \nrelcache. Also putting it there means we don't need any extra code to enforce non-empties when we \nbuild the index or do anything else with it.\n\nI think this is the nicest solution we can expect. It is even cleaner than the &&& ideas. So \nhopefully this gets us back to where we were when we decided to commit PKs & FKs to v17.\n\nAs before, I've left the nonempty check as a separate patch to make reviewing easier, but when \ncommitting I would squash it with the PK patch.\n\nRebased to 05faf06e9c.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Thu, 25 Jul 2024 08:52:44 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 7/25/24 08:52, Paul Jungwirth wrote:\n> Here is a patch moving the not-empty check into check_exclusion_or_unique_constraint. That is a more \n> logical place for it than ExecConstraints, since WITHOUT OVERLAPS is part of the index constraint \n> (not a CHECK constraint). At that point we've already looked up all the information we need. So \n> there is no extra cost for non-temporal tables, and no need to change pg_class or add to the \n> relcache. Also putting it there means we don't need any extra code to enforce non-empties when we \n> build the index or do anything else with it.\n> \n> I think this is the nicest solution we can expect. It is even cleaner than the &&& ideas. So \n> hopefully this gets us back to where we were when we decided to commit PKs & FKs to v17.\n> \n> As before, I've left the nonempty check as a separate patch to make reviewing easier, but when \n> committing I would squash it with the PK patch.\n\nHello,\n\nHere is an updated set of patches, rebased because the old patches no longer applied.\n\nAlso I have a question about foreign key RESTRICT behavior and the SQL spec.\n\nI added some tests for a particular condition:\nthere are two adjacent referenced rows (sharing a scalar key part),\nand a single referencing row whose time spans the transition between the referenced rows.\nSo graphing the records on a timeline, they look like this:\n\nPK: |-----|-----|\nFK: |-----|\n\nNow suppose you simultaneously update both referenced rows to be like so:\n\nPK: |---------|-|\nFK: |-----|\n\nNote that the FK's condition is still fulfilled.\n\nIn a NO ACTION constraint, we clearly should not raise an error (and we don't).\n\nIn a RESTRICT constraint, we *do* raise an error (but maybe we shouldn't).\n\nHere is some specific SQL (added to the tests in these patches):\n\n-- A PK update sliding the edge between two referenced rows:\nINSERT INTO temporal_rng (id, valid_at) VALUES\n ('[6,7)', daterange('2018-01-01', '2018-02-01')),\n ('[6,7)', daterange('2018-02-01', '2018-03-01'));\nINSERT INTO temporal_fk_rng2rng (id, valid_at, parent_id) VALUES\n ('[4,5)', daterange('2018-01-15', '2018-02-15'), '[6,7)');\nUPDATE temporal_rng\nSET valid_at = CASE WHEN lower(valid_at) = '2018-01-01'\n THEN daterange('2018-01-01', '2018-01-05')\n WHEN lower(valid_at) = '2018-02-01'\n THEN daterange('2018-01-05', '2018-03-01') END\nWHERE id = '[6,7)';\n\nor if you prefer PERIODs:\n\n-- A PK update sliding the edge between two referenced rows:\nINSERT INTO temporal_per (id, valid_from, valid_til) VALUES\n ('[6,7)', '2018-01-01', '2018-02-01'),\n ('[6,7)', '2018-02-01', '2018-03-01');\nINSERT INTO temporal_fk_per2per (id, valid_from, valid_til, parent_id) VALUES\n ('[4,5)', '2018-01-15', '2018-02-15', '[6,7)');\nUPDATE temporal_per\nSET valid_from = CASE WHEN valid_from = '2018-01-01' THEN '2018-01-01'\n WHEN valid_from = '2018-02-01' THEN '2018-01-05' END::date,\n valid_til = CASE WHEN valid_from = '2018-01-01' THEN '2018-01-05'\n WHEN valid_from = '2018-02-01' THEN '2018-03-01' END::date\nWHERE id = '[6,7)';\n\nHere is what the SQL:2011 spec says (section 4.18.3.3 from Part 2 Foundation):\n\n > ON UPDATE RESTRICT: any change to a referenced column in the referenced table is prohibited if \nthere is a matching row.\n\nSo that says we should raise an error.\nBut it seems clearly written with only non-temporal constraints in mind.\nIs it really correct in the scenario above? The reference is still valid.\nDoes anyone know if the text has been updated in more recent versions of the standard?\n\nPart of me is happy the standard says this, because not raising an error is harder to implement.\nMaybe a lot harder.\n\nOn the other hand, what if we have just one row in each table, and we *expand* the referenced range? \nIn other words, from this:\n\nPK: |-----|\nFK: |-|\n\nto this:\n\nPK: |-------|\nFK: |-|\n\nShould that raise an error too? Currently it does not.\n\nBut I think that is correct. As usual I go back to Date's model about \"one row per millisecond\".\nThe referenced milliseconds didn't get updated, only the unreferenced ones.\nSo I think what we are doing is okay.\n\nLikewise that same principle indicates we are doing the right thing in the original case:\nwe did update the referenced milliseconds.\nEven though we swapped in replacements, we have to raise an error.\nThis is no different than the non-temporal case.\n\nSo my conclusion is we are doing the right thing in all places.\nBut here is an opportunity for people to disagree. :-)\n\nRebased to f5f30c22ed.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Thu, 1 Aug 2024 10:09:03 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Fri, Aug 2, 2024 at 1:09 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> On 7/25/24 08:52, Paul Jungwirth wrote:\n> > Here is a patch moving the not-empty check into check_exclusion_or_unique_constraint. That is a more\n> > logical place for it than ExecConstraints, since WITHOUT OVERLAPS is part of the index constraint\n> > (not a CHECK constraint). At that point we've already looked up all the information we need. So\n> > there is no extra cost for non-temporal tables, and no need to change pg_class or add to the\n> > relcache. Also putting it there means we don't need any extra code to enforce non-empties when we\n> > build the index or do anything else with it.\n> >\n> > I think this is the nicest solution we can expect. It is even cleaner than the &&& ideas. So\n> > hopefully this gets us back to where we were when we decided to commit PKs & FKs to v17.\n> >\n> > As before, I've left the nonempty check as a separate patch to make reviewing easier, but when\n> > committing I would squash it with the PK patch.\n>\n> Hello,\n>\n> Here is an updated set of patches, rebased because the old patches no longer applied.\n>\n\nvoid\nExecWithoutOverlapsNotEmpty(Relation rel, Datum attval, Oid typtype,\nOid atttypid);\n\nshould this just be a static function?\nI am not so sure.\n\nOid typtype\nshould be\nchar typtype\n?\n\n errmsg(\"new row for relation \\\"%s\\\" contains empty\nWITHOUT OVERLAPS value\",\nwe already have Form_pg_attribute via \"TupleDesc tupdesc =\nRelationGetDescr(heap);\"\nwe can make the error message be:\n errmsg(\"cannot be empty range value for WITHOUT\nOVERLAPS column \\\"%s\\\" in relation \\\"%s\\\", colname,\nRelationGetRelationName(rel))\n\n\nelog(ERROR, \"Got unknown type for WITHOUT OVERLAPS column: %d\", atttypid);\npeople will wonder if domain over range works or not. but currently\nnot, better error message would be:\n elog(ERROR, \"WITHOUT OVERLAPS column \\\"%s\\\" is not a range\nor multirange type \", colname);\nThis part is unlikely to be reachable, so I don't have a strong opinion on it.\n\n\n+ if (!found)\n+ column = NULL;\nthis part no need?\nbecause if not found, the column would be last element in ColumnDef\ntype list columns\nalso the following change also make sense:\n\n+ if (!OidIsValid(typid) && column)\n+ typid = typenameTypeId(NULL, column->typeName);\n\n\n+ /* The WITHOUT OVERLAPS part (if any) must be a range or multirange type. */\n+ if (constraint->without_overlaps && lc == list_last_cell(constraint->keys))\n+ {\n+ if (!found && cxt->isalter)\n+ {\n+ /*\n+ * Look up the column type on existing table.\n+ * If we can't find it, let things fail in DefineIndex.\n+ */\n+ Relation rel = cxt->rel;\n+ for (int i = 0; i < rel->rd_att->natts; i++)\n+ {\n+ Form_pg_attribute attr = TupleDescAttr(rel->rd_att, i);\n+ const char *attname;\n+\n+ if (attr->attisdropped)\n+ break;\n+\n+ attname = NameStr(attr->attname);\n+ if (strcmp(attname, key) == 0)\n+ {\n+ typid = attr->atttypid;\n+ break;\n+ }\n+ }\n+ }\n+ if (found)\n+{\n+}\n\nI am confused with this change?\nyou found out the typid,but didn't using this information, should it be\n+ if (strcmp(attname, key) == 0)\n+ {\n+ typid = attr->atttypid;\n+ found = true;\n+ break;\n+ }\n\nso the failing error message be same for the following two cases:\nCREATE TABLE t1 (id int4range,valid_at tsrange,b text,\n CONSTRAINT temporal_rng_pk PRIMARY KEY (id, b WITHOUT OVERLAPS)\n);\n\nCREATE TABLE t1 (id int4range,valid_at tsrange,b text);\nalter table t1 add CONSTRAINT temporal_rng_pk PRIMARY KEY (id, b\nWITHOUT OVERLAPS);\n\n\n", "msg_date": "Tue, 6 Aug 2024 10:02:13 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Tue, Aug 6, 2024 at 10:02 AM jian he <jian.universality@gmail.com> wrote:\n>\n> On Fri, Aug 2, 2024 at 1:09 AM Paul Jungwirth\n> <pj@illuminatedcomputing.com> wrote:\n> >\n> > On 7/25/24 08:52, Paul Jungwirth wrote:\n> > > Here is a patch moving the not-empty check into check_exclusion_or_unique_constraint. That is a more\n> > > logical place for it than ExecConstraints, since WITHOUT OVERLAPS is part of the index constraint\n> > > (not a CHECK constraint). At that point we've already looked up all the information we need. So\n> > > there is no extra cost for non-temporal tables, and no need to change pg_class or add to the\n> > > relcache. Also putting it there means we don't need any extra code to enforce non-empties when we\n> > > build the index or do anything else with it.\n> > >\n> > > I think this is the nicest solution we can expect. It is even cleaner than the &&& ideas. So\n> > > hopefully this gets us back to where we were when we decided to commit PKs & FKs to v17.\n> > >\n> > > As before, I've left the nonempty check as a separate patch to make reviewing easier, but when\n> > > committing I would squash it with the PK patch.\n> >\n> > Hello,\n> >\n> > Here is an updated set of patches, rebased because the old patches no longer applied.\n> >\n\nhi. some minor issues.\n\nin generateClonedIndexStmt\nindex->iswithoutoverlaps = (idxrec->indisprimary ||\nidxrec->indisunique) && idxrec->indisexclusion;\nthis case, the index accessMethod will be \"gist\" only?\n\ndo you think it's necessary to:\nindex->iswithoutoverlaps = (idxrec->indisprimary ||\nidxrec->indisunique) && idxrec->indisexclusion\n&& strcmp(index->accessMethod, \"gist\") == 0);\n\n\nsrc/bin/pg_dump/pg_dump.c and src/bin/psql/describe.c\nshould be \"if (pset.sversion >= 180000)\"?\n\n\n+ (This is sometimes called a\n+ temporal key, if the column is a range of dates or timestamps, but\n+ PostgreSQL allows ranges over any base type.)\n\nPostgreSQL should be decorated as\n<productname>PostgreSQL</productname>\n?\n\n\n\nin DefineIndex we have:\nif (stmt->unique && !stmt->iswithoutoverlaps && !amRoutine->amcanunique)\nif (stmt->indexIncludingParams != NIL && !amRoutine->amcaninclude)\nif (numberOfKeyAttributes > 1 && !amRoutine->amcanmulticol)\nif (exclusion && amRoutine->amgettuple == NULL)\n\nmaybe we can add:\n if (stmt->iswithoutoverlaps && strcmp(accessMethodName, \"gist\") != 0)\n ereport(ERROR,\n (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),\n errmsg(\"access method \\\"%s\\\" does not support WITHOUT\nOVERLAPS constraints\",\n accessMethodName)));\n\n\n\n+ /* exclusionOpNames can be non-NIL if we are creating a partition */\n+ if (iswithoutoverlaps && exclusionOpNames == NIL)\n+ {\n+ indexInfo->ii_ExclusionOps = palloc_array(Oid, nkeycols);\n+ indexInfo->ii_ExclusionProcs = palloc_array(Oid, nkeycols);\n+ indexInfo->ii_ExclusionStrats = palloc_array(uint16, nkeycols);\n+ }\nthe comment is not 100% correct, i think.\ncreating a partition, \"create table like INCLUDING ALL\", both will go\nthrough generateClonedIndexStmt.\ngenerateClonedIndexStmt will produce exclusionOpNames if this index\nsupports exclusion constraint.\n\n\n", "msg_date": "Tue, 6 Aug 2024 22:50:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "Here are some fixes based on outstanding feedback (some old some new). Details below:\n\nOn 3/25/24 17:00, jian he wrote:\n > hi.\n > minor issues I found in v33-0003.\n > there are 29 of {check_amproc_signature?.*false}\n > only one {check_amproc_signature(procform->amproc, opcintype, true}\n > is this refactoring really worth it?\n\nI could add a separate function, for example check_amproc_retset_signature, but it would require \nduplicating almost the whole existing function, so a param seems better here.\n\n > We also need to refactor gistadjustmembers?\n\nYou're right, added the new support procs there.\n\n > + <row>\n > + <entry><function>intersect</function></entry>\n > + <entry>computes intersection with <literal>FOR PORTION OF</literal>\n > + bounds</entry>\n > + <entry>13</entry>\n > + </row>\n > + <row>\n > + <entry><function>without_portion</function></entry>\n > + <entry>computes remaining duration(s) outside\n > + <literal>FOR PORTION OF</literal> bounds</entry>\n > + <entry>14</entry>\n > + </row>\n > needs to add \"(optional)\".\n\nAdded.\n\n > +<programlisting>\n > +Datum\n > +my_range_intersect(PG_FUNCTION_ARGS)\n > +{\n > + RangeType *r1 = PG_GETARG_RANGE_P(0);\n > + RangeType *r2 = PG_GETARG_RANGE_P(1);\n > + TypeCacheEntry *typcache;\n > +\n > + /* Different types should be prevented by ANYRANGE matching rules */\n > + if (RangeTypeGetOid(r1) != RangeTypeGetOid(r2))\n > elog(ERROR, \"range\n > types do not match\");\n > +\n > + typcache = range_get_typcache(fcinfo, RangeTypeGetOid(r1));\n > +\n > + PG_RETURN_RANGE_P(range_intersect_internal(typcache, r1, r2));\n > +}\n > +</programlisting>\n > the elog, ERROR indentation is wrong?\n\nFixed.\n\n > +/*\n > + * range_without_portion_internal - Sets outputs and outputn to the ranges\n > + * remaining and their count (respectively) after subtracting r2 from r1.\n > + * The array should never contain empty ranges.\n > + * The outputs will be ordered. We expect that outputs is an array of\n > + * RangeType pointers, already allocated with two slots.\n > + */\n > +void\n > +range_without_portion_internal(TypeCacheEntry *typcache, RangeType *r1,\n > + RangeType *r2, RangeType **outputs, int *outputn)\n > the comments need to be refactored?\n > there is nothing related to \"slot\"?\n > not sure the \"array\" description is right.\n > (my understanding is compute rangetype r1 and r2, and save the result to\n > RangeType **outputs.\n\nChanged \"slots\" to \"elements\". Everything else looks correct to me.\n\n > select proisstrict, proname from pg_proc where proname =\n > 'range_without_portion';\n > range_without_portion is strict.\n > but\n > select range_without_portion(NULL::int4range, int4range(11, 20,'[]'));\n > return zero rows.\n > Is this the expected behavior?\n\nReturning zero rows is correct if the function is never called (which is what strict does).\nI see other strict retset functions, e.g. json_array_elements.\nThat also returns zero rows if you say SELECT json_array_elements(NULL);\n\nOn 4/14/24 17:00, jian he wrote:\n > for unique index, primary key:\n > ii_ExclusionOps, ii_UniqueOps is enough to distinguish this index\n > support without overlaps,\n > we don't need another ii_HasWithoutOverlaps?\n > (i didn't test it though)\n\nI think it is worth having something named. But also ii_Exclusion is not set in \nindex_concurrently_create_copy, so inferring when we have WITHOUT OVERLAPS will not work in that case.\n\n > ON CONFLICT DO NOTHING\n > ON CONFLICT (id, valid_at) DO NOTHING\n > ON CONFLICT ON CONSTRAINT temporal_rng_pk DO NOTHING\n > I am confused by the test.\n > here temporal_rng only has one primary key, ON CONFLICT only deals with it.\n > I thought these three are the same thing?\n\nThey all have somewhat different code paths in infer_arbiter_indexes, and they mean different \nthings. I recall when I first started dealing with empty ranges several of these test cases caught \ndifferent bugs (as well as the DO UPDATE cases).\n\nOn 8/5/24 19:02, jian he wrote:\n > void\n > ExecWithoutOverlapsNotEmpty(Relation rel, Datum attval, Oid typtype,\n > Oid atttypid);\n >\n > should this just be a static function?\n > I am not so sure.\n\nChanged. In a previous version I was calling this from two places, but I'm not anymore.\n\n > Oid typtype\n > should be\n > char typtype\n > ?\n\nOops, you're right! Fixed.\n\n > errmsg(\"new row for relation \\\"%s\\\" contains empty\n > WITHOUT OVERLAPS value\",\n > we already have Form_pg_attribute via \"TupleDesc tupdesc =\n > RelationGetDescr(heap);\"\n > we can make the error message be:\n > errmsg(\"cannot be empty range value for WITHOUT\n > OVERLAPS column \\\"%s\\\" in relation \\\"%s\\\", colname,\n > RelationGetRelationName(rel))\n\nYes, it's nicer to report the column name. Changed.\n\n > elog(ERROR, \"Got unknown type for WITHOUT OVERLAPS column: %d\", atttypid);\n > people will wonder if domain over range works or not. but currently\n > not, better error message would be:\n > elog(ERROR, \"WITHOUT OVERLAPS column \\\"%s\\\" is not a range\n > or multirange type \", colname);\n > This part is unlikely to be reachable, so I don't have a strong opinion on it.\n\nLikewise.\n\n > + if (!found)\n > + column = NULL;\n > this part no need?\n > because if not found, the column would be last element in ColumnDef\n > type list columns\n\nWe can later set `found` to true from inheritance (or it being a system column), and then `column` \nis set but wrong. So setting `column` to null seems generally clearer. But concretely, I use \n`column` below to give me the type (which I otherwise don't have in CREATE TABLE), so I can forbid \ntypes other than range and multirange.\n\n > also the following change also make sense:\n >\n > + if (!OidIsValid(typid) && column)\n > + typid = typenameTypeId(NULL, column->typeName);\n\nThis is because in CREATE TABLE I need to get the type from the `column` variable.\n\n > I am confused with this change?\n > you found out the typid,but didn't using this information, should it be\n > + if (strcmp(attname, key) == 0)\n > + {\n > + typid = attr->atttypid;\n > + found = true;\n > + break;\n > + }\n\nYes. Actually that is in the PERIOD patch file, but it should be in Forbid-empty-ranges. Moved.\n\n > so the failing error message be same for the following two cases:\n > CREATE TABLE t1 (id int4range,valid_at tsrange,b text,\n > CONSTRAINT temporal_rng_pk PRIMARY KEY (id, b WITHOUT OVERLAPS)\n > );\n >\n > CREATE TABLE t1 (id int4range,valid_at tsrange,b text);\n > alter table t1 add CONSTRAINT temporal_rng_pk PRIMARY KEY (id, b\n > WITHOUT OVERLAPS);\n\nI think the same error message is the right thing to do here.\nIt looks like that's what we're doing.\nIf I've misunderstand what you want, can you clarify?\n\nOn 8/6/24 07:50, jian he wrote:\n > in generateClonedIndexStmt\n > index->iswithoutoverlaps = (idxrec->indisprimary ||\n > idxrec->indisunique) && idxrec->indisexclusion;\n > this case, the index accessMethod will be \"gist\" only?\n >\n > do you think it's necessary to:\n > index->iswithoutoverlaps = (idxrec->indisprimary ||\n > idxrec->indisunique) && idxrec->indisexclusion\n > && strcmp(index->accessMethod, \"gist\") == 0);\n\nThis doesn't seem necessary, and maybe we'll support non-gist someday, when this condition would be \nmisleading.\n\n > src/bin/pg_dump/pg_dump.c and src/bin/psql/describe.c\n > should be \"if (pset.sversion >= 180000)\"?\n\nAh, thanks. Changing these from 170000 also landed in the wrong patch file. Fixed.\n\n > + (This is sometimes called a\n > + temporal key, if the column is a range of dates or timestamps, but\n > + PostgreSQL allows ranges over any base type.)\n >\n > PostgreSQL should be decorated as\n > <productname>PostgreSQL</productname>\n\nDone.\n\n > in DefineIndex we have:\n > if (stmt->unique && !stmt->iswithoutoverlaps && !amRoutine->amcanunique)\n > if (stmt->indexIncludingParams != NIL && !amRoutine->amcaninclude)\n > if (numberOfKeyAttributes > 1 && !amRoutine->amcanmulticol)\n > if (exclusion && amRoutine->amgettuple == NULL)\n >\n > maybe we can add:\n > if (stmt->iswithoutoverlaps && strcmp(accessMethodName, \"gist\") != 0)\n > ereport(ERROR,\n > (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),\n > errmsg(\"access method \\\"%s\\\" does not support WITHOUT\n > OVERLAPS constraints\",\n > accessMethodName)));\n\nOkay.\n\n > + /* exclusionOpNames can be non-NIL if we are creating a partition */\n > + if (iswithoutoverlaps && exclusionOpNames == NIL)\n > + {\n > + indexInfo->ii_ExclusionOps = palloc_array(Oid, nkeycols);\n > + indexInfo->ii_ExclusionProcs = palloc_array(Oid, nkeycols);\n > + indexInfo->ii_ExclusionStrats = palloc_array(uint16, nkeycols);\n > + }\n > the comment is not 100% correct, i think.\n > creating a partition, \"create table like INCLUDING ALL\", both will go\n > through generateClonedIndexStmt.\n > generateClonedIndexStmt will produce exclusionOpNames if this index\n > supports exclusion constraint.\n\nI think the comment is correct, but non-NIL is a confusing double negative, and it's not clear that \nthe comment is giving the motivation for the second half of the condition.\nI re-wrote it to be more clear. I also adjusted the `if` to avoid parsing operator names when not \nneeded.\n\nRebased to e56ccc8e42.\n\nYours,\n\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com", "msg_date": "Wed, 7 Aug 2024 13:54:52 -0700", "msg_from": "Paul Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Thu, Aug 8, 2024 at 4:54 AM Paul Jungwirth\n<pj@illuminatedcomputing.com> wrote:\n>\n> Rebased to e56ccc8e42.\n\nI only applied to 0001-0003.\nin create_table.sgml, I saw the WITHOUT OVERLAPS change is mainly in\ntable_constraint.\nbut we didn't touch alter_table.sgml.\nDo we also need to change alter_table.sgml correspondingly?\n\n\n+ if (constraint->without_overlaps)\n+ {\n+ /*\n+ * This enforces that there is at least one equality column\n+ * besides the WITHOUT OVERLAPS columns. This is per SQL\n+ * standard. XXX Do we need this?\n+ */\n+ if (list_length(constraint->keys) < 2)\n+ ereport(ERROR,\n+ errcode(ERRCODE_SYNTAX_ERROR),\n+ errmsg(\"constraint using WITHOUT OVERLAPS needs at least two columns\"));\n+\n+ /* WITHOUT OVERLAPS requires a GiST index */\n+ index->accessMethod = \"gist\";\n+ }\nif Constraint->conname is not NULL, we can\n+ errmsg(\"constraint \\\"%s\\\" using WITHOUT OVERLAPS needs at least two\ncolumns\"));\n\n\"XXX Do we need this?\"\nI think currently we need this, otherwise the following create_table\nsynopsis will not be correct.\nUNIQUE [ NULLS [ NOT ] DISTINCT ] ( column_name [, ... ] [,\ncolumn_name WITHOUT OVERLAPS ] )\nPRIMARY KEY ( column_name [, ... ] [, column_name WITHOUT OVERLAPS ] )\n\n\nwe add a column in catalog-pg-constraint.\ndo we need change column conexclop,\n\"If an exclusion constraint, list of the per-column exclusion operators \"\nbut currently, primary key, unique constraint both have valid conexclop.\n\n\n+static void\n+ExecWithoutOverlapsNotEmpty(Relation rel, NameData attname, Datum\nattval, char typtype, Oid atttypid)\n+{\n+ bool isempty;\n+ RangeType *r;\n+ MultirangeType *mr;\n+\n+ switch (typtype)\n+ {\n+ case TYPTYPE_RANGE:\n+ r = DatumGetRangeTypeP(attval);\n+ isempty = RangeIsEmpty(r);\n+ break;\n+ case TYPTYPE_MULTIRANGE:\n+ mr = DatumGetMultirangeTypeP(attval);\n+ isempty = MultirangeIsEmpty(mr);\n+ break;\n+ default:\n+ elog(ERROR, \"WITHOUT OVERLAPS column \\\"%s\\\" is not a range or multirange\",\n+ NameStr(attname));\n+ }\n+\n+ /* Report a CHECK_VIOLATION */\n+ if (isempty)\n+ ereport(ERROR,\n+ (errcode(ERRCODE_CHECK_VIOLATION),\n+ errmsg(\"empty WITHOUT OVERLAPS value found in column \\\"%s\\\" in\nrelation \\\"%s\\\"\",\n+ NameStr(attname), RelationGetRelationName(rel))));\n+}\nI think in the default branch, you need at least set the isempty\nvalue, otherwise maybe there will be a compiler warning\nbecause later your use isempty, but via default branch is value undefined?\n\n\n+ /*\n+ * If this is a WITHOUT OVERLAPS constraint,\n+ * we must also forbid empty ranges/multiranges.\n+ * This must happen before we look for NULLs below,\n+ * or a UNIQUE constraint could insert an empty\n+ * range along with a NULL scalar part.\n+ */\n+ if (indexInfo->ii_WithoutOverlaps)\n+ {\n+ ExecWithoutOverlapsNotEmpty(heap, att->attname,\n+ }\npreviously we found out that if this happens later, then it won't work.\nbut this comment didn't explain why this must have happened earlier.\nI didn't dig deep enough to find out why.\nbut explaining it would be very helpful.\n\n\nI think some tests are duplicated, so I did the refactoring.", "msg_date": "Fri, 16 Aug 2024 10:12:00 +0800", "msg_from": "jian he <jian.universality@gmail.com>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 07.08.24 22:54, Paul Jungwirth wrote:\n> Here are some fixes based on outstanding feedback (some old some new). \n\nI have studied your patches v39-0001 through v39-0004, which correspond \nto what had been reverted plus the new empty range check plus various \nminor fixes. This looks good to me now, so I propose to go ahead with that.\n\nBtw., in your 0003 you point out that this prevents using the WITHOUT \nOVERLAPS functionality for non-range types. But I think this could be \naccomplished by adding an \"is empty\" callback as a support function or \nsomething like that. I'm not suggesting to do that here, but it might \nbe worth leaving a comment about that possibility.\n\n\n\n", "msg_date": "Thu, 5 Sep 2024 14:09:49 +0200", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Thu, Sep 5, 2024 at 5:09 AM Peter Eisentraut <peter@eisentraut.org> wrote:\n>\n> On 07.08.24 22:54, Paul Jungwirth wrote:\n> > Here are some fixes based on outstanding feedback (some old some new).\n>\n> I have studied your patches v39-0001 through v39-0004, which correspond\n> to what had been reverted plus the new empty range check plus various\n> minor fixes. This looks good to me now, so I propose to go ahead with that.\n\nSounds good. Thanks!\n\n> Btw., in your 0003 you point out that this prevents using the WITHOUT\n> OVERLAPS functionality for non-range types. But I think this could be\n> accomplished by adding an \"is empty\" callback as a support function or\n> something like that. I'm not suggesting to do that here, but it might\n> be worth leaving a comment about that possibility.\n\nYes, I was thinking the same. Agreed as well: it should be a follow-up\npatch, not needed for the base functionality. If we wanted a more\ngeneric name it could be \"canWithoutOverlap\" instead of \"[!]isempty\",\nbut even \"isempty\" is probably still completely accurate.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Thu, 5 Sep 2024 06:45:44 -0700", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On Mon, Feb 12, 2024 at 3:55 AM Peter Eisentraut <peter@eisentraut.org> wrote:\n> Have you checked that the generated queries can use indexes and have\n> suitable performance? Do you have example execution plans maybe?\n\nThis took longer than expected, but I wrote a long blog post about it\nhere: https://illuminatedcomputing.com/posts/2024/09/benchmarking-temporal-foreign-keys/\n\nThe short answer is that yes we use the index, and the query plan is\nreasonable. I compared performance against two alternate\nimplementations, and range_agg was fastest most of the time. When you\nhave a lot of invalid FK checks, the implementation in Snodgrass's\nbook wins, because it can short-circuit the plan and return a false\nresult without executing most of it. But that seems like an unusual\nsituation, and we should optimize for mostly-valid FK checks instead.\n\nThere are some more experiments I'd like to do (see the end of that\npost), but for now I plan to prioritize getting the FOR PORTION OF\npatch ready to commit. But if there is anything you'd like to know\nmore urgently, let me know.\n\nYours,\n\n-- \nPaul ~{:-)\npj@illuminatedcomputing.com\n\n\n", "msg_date": "Sun, 15 Sep 2024 21:12:34 -0500", "msg_from": "Paul A Jungwirth <pj@illuminatedcomputing.com>", "msg_from_op": true, "msg_subject": "Re: SQL:2011 application time" }, { "msg_contents": "On 05.09.24 14:09, Peter Eisentraut wrote:\n> On 07.08.24 22:54, Paul Jungwirth wrote:\n>> Here are some fixes based on outstanding feedback (some old some new). \n> \n> I have studied your patches v39-0001 through v39-0004, which correspond \n> to what had been reverted plus the new empty range check plus various \n> minor fixes.  This looks good to me now, so I propose to go ahead with \n> that.\n> \n> Btw., in your 0003 you point out that this prevents using the WITHOUT \n> OVERLAPS functionality for non-range types.  But I think this could be \n> accomplished by adding an \"is empty\" callback as a support function or \n> something like that.  I'm not suggesting to do that here, but it might \n> be worth leaving a comment about that possibility.\n\nI have committed these, as explained here.\n\nI look forward to an updated patch set from you to review the \"FOR \nPORTION OF\" patches next.\n\n\n\n", "msg_date": "Tue, 17 Sep 2024 11:45:52 +0200", "msg_from": "Peter Eisentraut <peter@eisentraut.org>", "msg_from_op": false, "msg_subject": "Re: SQL:2011 application time" } ]
[ { "msg_contents": "Hi,\n\nFiltering of columns at the publisher node will allow for selective\nreplication of data between publisher and subscriber. In case the updates\non the publisher are targeted only towards specific columns, the user will\nhave an option to reduce network consumption by not sending the data\ncorresponding to new columns that do not change. Note that replica\nidentity values will always be sent irrespective of column filtering settings.\nThe column values that are not sent by the publisher will be populated\nusing local values on the subscriber. For insert command, non-replicated\ncolumn values will be NULL or the default.\nIf column names are not specified while creating or altering a publication,\nall the columns are replicated as per current behaviour.\n\nThe proposal for syntax to add table with column names to publication is as\nfollows:\nCreate publication:\n\nCREATE PUBLICATION <pub_name> [ FOR TABLE [ONLY] table_name [(colname\n[,…])] | FOR ALL TABLES]\n\n\nAlter publication:\n\nALTER PUBLICATION <pub_name> ADD TABLE [ONLY] table_name [(colname [, ..])]\n\n\nPlease find attached a patch that implements the above proposal.\nWhile the patch contains basic implementation and tests, several\nimprovements\nand sanity checks are underway. I will post an updated patch with those\nchanges soon.\n\nKindly let me know your opinion.\n\n\nThank you,\n\nRahila Syed", "msg_date": "Thu, 1 Jul 2021 01:06:11 +0530", "msg_from": "Rahila Syed <rahilasyed90@gmail.com>", "msg_from_op": true, "msg_subject": "Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Jul 1, 2021 at 1:06 AM Rahila Syed <rahilasyed90@gmail.com> wrote:\n>\n> Hi,\n>\n> Filtering of columns at the publisher node will allow for selective replication of data between publisher and subscriber. In case the updates on the publisher are targeted only towards specific columns, the user will have an option to reduce network consumption by not sending the data corresponding to new columns that do not change. Note that replica identity values will always be sent irrespective of column filtering settings. The column values that are not sent by the publisher will be populated using local values on the subscriber. For insert command, non-replicated column values will be NULL or the default.\n> If column names are not specified while creating or altering a publication,\n> all the columns are replicated as per current behaviour.\n>\n> The proposal for syntax to add table with column names to publication is as follows:\n> Create publication:\n>\n> CREATE PUBLICATION <pub_name> [ FOR TABLE [ONLY] table_name [(colname [,…])] | FOR ALL TABLES]\n>\n>\n> Alter publication:\n>\n> ALTER PUBLICATION <pub_name> ADD TABLE [ONLY] table_name [(colname [, ..])]\n>\n>\n> Please find attached a patch that implements the above proposal.\n> While the patch contains basic implementation and tests, several improvements\n> and sanity checks are underway. I will post an updated patch with those changes soon.\n>\n> Kindly let me know your opinion.\n>\n\nI haven't looked into the patch yet but +1 for the idea.\n\n-- \nRegards,\nDilip Kumar\nEnterpriseDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Thu, 1 Jul 2021 11:50:36 +0530", "msg_from": "Dilip Kumar <dilipbalaut@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Jul 1, 2021 at 1:06 AM Rahila Syed <rahilasyed90@gmail.com> wrote:\n>\n> Hi,\n>\n> Filtering of columns at the publisher node will allow for selective replication of data between publisher and subscriber. In case the updates on the publisher are targeted only towards specific columns, the user will have an option to reduce network consumption by not sending the data corresponding to new columns that do not change. Note that replica identity values will always be sent irrespective of column filtering settings. The column values that are not sent by the publisher will be populated using local values on the subscriber. For insert command, non-replicated column values will be NULL or the default.\n> If column names are not specified while creating or altering a publication,\n> all the columns are replicated as per current behaviour.\n>\n> The proposal for syntax to add table with column names to publication is as follows:\n> Create publication:\n>\n> CREATE PUBLICATION <pub_name> [ FOR TABLE [ONLY] table_name [(colname [,…])] | FOR ALL TABLES]\n>\n>\n> Alter publication:\n>\n> ALTER PUBLICATION <pub_name> ADD TABLE [ONLY] table_name [(colname [, ..])]\n>\n>\n> Please find attached a patch that implements the above proposal.\n> While the patch contains basic implementation and tests, several improvements\n> and sanity checks are underway. I will post an updated patch with those changes soon.\n>\n> Kindly let me know your opinion.\n\nThis idea gives more flexibility to the user, +1 for the feature.\n\nRegards,\nVignesh\n\n\n", "msg_date": "Thu, 1 Jul 2021 18:58:58 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hello, here are a few comments on this patch.\n\nThe patch adds a function get_att_num_by_name; but we have a lsyscache.c\nfunction for that purpose, get_attnum. Maybe that one should be used\ninstead?\n\nget_tuple_columns_map() returns a bitmapset of the attnos of the columns\nin the given list, so its name feels wrong. I propose\nget_table_columnset(). However, this function is invoked for every\ninsert/update change, so it's going to be far too slow to be usable. I\nthink you need to cache the bitmapset somewhere, so that the function is\nonly called on first use. I didn't look very closely, but it seems that\nstruct RelationSyncEntry may be a good place to cache it.\n\nThe patch adds a new parse node PublicationTable, but doesn't add\ncopyfuncs.c, equalfuncs.c, readfuncs.c, outfuncs.c support for it.\nMaybe try a compile with WRITE_READ_PARSE_PLAN_TREES and/or\nCOPY_PARSE_PLAN_TREES enabled to make sure everything is covered.\n(I didn't verify that this actually catches anything ...)\n\nThe new column in pg_publication_rel is prrel_attr. This name seems at\nodds with existing column names (we don't use underscores in catalog\ncolumn names). Maybe prattrs is good enough? prrelattrs? We tend to\nuse plurals for columns that are arrays.\n\nIt's not super clear to me that strlist_to_textarray() and related\nprocessing will behave sanely when the column names contain weird\ncharacters such as commas or quotes, or just when used with uppercase\ncolumn names. Maybe it's worth having tests that try to break such\ncases.\n\nYou seem to have left a debugging \"elog(LOG)\" line in OpenTableList.\n\nI got warnings from \"git am\" about trailing whitespace being added by\nthe patch in two places.\n\n\nThanks!\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Tue, 6 Jul 2021 19:42:51 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi, I was wondering if/when a subset of cols is specified then does\nthat mean it will be possible for the table to be replicated to a\n*smaller* table at the subscriber side?\n\ne.g Can a table with 7 cols replicated to a table with 2 cols?\n\ntable tab1(a,b,c,d,e,f,g) --> CREATE PUBLICATION pub1 FOR TABLE\ntab1(a,b) --> table tab1(a,b)\n\n~~\n\nI thought maybe that should be possible, but the expected behaviour\nfor that scenario was not very clear to me from the thread/patch\ncomments. And the new TAP test uses the tab1 table created exactly the\nsame for pub/sub, so I couldn't tell from the test code either.\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia\n\n\n", "msg_date": "Thu, 8 Jul 2021 13:27:27 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi Peter,\n\nHi, I was wondering if/when a subset of cols is specified then does\n> that mean it will be possible for the table to be replicated to a\n> *smaller* table at the subscriber side?\n>\ne.g Can a table with 7 cols replicated to a table with 2 cols?\n>\n> table tab1(a,b,c,d,e,f,g) --> CREATE PUBLICATION pub1 FOR TABLE\n> tab1(a,b) --> table tab1(a,b)\n>\n> ~~\n\n\n> I thought maybe that should be possible, but the expected behaviour\n> for that scenario was not very clear to me from the thread/patch\n> comments. And the new TAP test uses the tab1 table created exactly the\n> same for pub/sub, so I couldn't tell from the test code either.\n>\n\nCurrently, this capability is not included in the patch. If the table on\nthe subscriber\nserver has lesser attributes than that on the publisher server, it throws\nan error at the\ntime of CREATE SUBSCRIPTION.\n\nAbout having such a functionality, I don't immediately see any issue with\nit as long\nas we make sure replica identity columns are always present on both\ninstances.\nHowever, need to carefully consider situations in which a server subscribes\nto multiple\npublications, each publishing a different subset of columns of a table.\n\n\nThank you,\nRahila Syed\n\nHi Peter,Hi, I was wondering if/when a subset of cols is specified then does\nthat mean it will be possible for the table to be replicated to a\n*smaller* table at the subscriber side? \ne.g Can a table with 7 cols replicated to a table with 2 cols?\n\ntable tab1(a,b,c,d,e,f,g) --> CREATE PUBLICATION pub1 FOR TABLE\ntab1(a,b)  --> table tab1(a,b)\n\n~~\n\nI thought maybe that should be possible, but the expected behaviour\nfor that scenario was not very clear to me from the thread/patch\ncomments. And the new TAP test uses the tab1 table created exactly the\nsame for pub/sub, so I couldn't tell from the test code either. Currently, this capability is not included in the patch. If the table on the subscriberserver has lesser attributes than that on the publisher server, it throws an error at the time of CREATE SUBSCRIPTION.About having such a functionality, I don't immediately see any issue with it as longas we make sure replica identity columns are always present on both instances.However, need to carefully consider situations in which a server subscribes to multiple publications,  each publishing a different subset of columns of a table.   Thank you,Rahila Syed", "msg_date": "Mon, 12 Jul 2021 14:02:26 +0530", "msg_from": "Rahila Syed <rahilasyed90@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi Alvaro,\n\nThank you for comments.\n\nThe patch adds a function get_att_num_by_name; but we have a lsyscache.c\n> function for that purpose, get_attnum. Maybe that one should be used\n> instead?\n>\n> Thank you for pointing that out, I agree it makes sense to reuse the\nexisting function.\nChanged it accordingly in the attached patch.\n\n\n> get_tuple_columns_map() returns a bitmapset of the attnos of the columns\n> in the given list, so its name feels wrong. I propose\n> get_table_columnset(). However, this function is invoked for every\n> insert/update change, so it's going to be far too slow to be usable. I\n> think you need to cache the bitmapset somewhere, so that the function is\n> only called on first use. I didn't look very closely, but it seems that\n> struct RelationSyncEntry may be a good place to cache it.\n>\n> Makes sense, changed accordingly.\n\n\n> The patch adds a new parse node PublicationTable, but doesn't add\n> copyfuncs.c, equalfuncs.c, readfuncs.c, outfuncs.c support for it.\n> Maybe try a compile with WRITE_READ_PARSE_PLAN_TREES and/or\n> COPY_PARSE_PLAN_TREES enabled to make sure everything is covered.\n> (I didn't verify that this actually catches anything ...)\n>\n\nI will test this and include these changes in the next version.\n\n\n> The new column in pg_publication_rel is prrel_attr. This name seems at\n> odds with existing column names (we don't use underscores in catalog\n> column names). Maybe prattrs is good enough? prrelattrs? We tend to\n> use plurals for columns that are arrays.\n>\n> Renamed it to prattrs as per suggestion.\n\nIt's not super clear to me that strlist_to_textarray() and related\n> processing will behave sanely when the column names contain weird\n> characters such as commas or quotes, or just when used with uppercase\n> column names. Maybe it's worth having tests that try to break such\n> cases.\n>\n> Sure, I will include these tests in the next version.\n\n\n> You seem to have left a debugging \"elog(LOG)\" line in OpenTableList.\n>\n> Removed.\n\n\n> I got warnings from \"git am\" about trailing whitespace being added by\n> the patch in two places.\n>\n> Should be fixed now.\n\nThank you,\nRahila Syed", "msg_date": "Mon, 12 Jul 2021 15:08:23 +0530", "msg_from": "Rahila Syed <rahilasyed90@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 7/12/21 10:32 AM, Rahila Syed wrote:\n> Hi Peter,\n> \n> Hi, I was wondering if/when a subset of cols is specified then does\n> that mean it will be possible for the table to be replicated to a\n> *smaller* table at the subscriber side? \n> \n> e.g Can a table with 7 cols replicated to a table with 2 cols?\n> \n> table tab1(a,b,c,d,e,f,g) --> CREATE PUBLICATION pub1 FOR TABLE\n> tab1(a,b)  --> table tab1(a,b)\n> \n> ~~\n> \n> \n> I thought maybe that should be possible, but the expected behaviour\n> for that scenario was not very clear to me from the thread/patch\n> comments. And the new TAP test uses the tab1 table created exactly the\n> same for pub/sub, so I couldn't tell from the test code either.\n> \n>  \n> Currently, this capability is not included in the patch. If the table on\n> the subscriber\n> server has lesser attributes than that on the publisher server, it\n> throws an error at the \n> time of CREATE SUBSCRIPTION.\n> \n\nThat's a bit surprising, to be honest. I do understand the patch simply\ntreats the filtered columns as \"unchanged\" because that's the simplest\nway to filter the *data* of the columns. But if someone told me we can\n\"filter columns\" I'd expect this to work without the columns on the\nsubscriber.\n\n> About having such a functionality, I don't immediately see any issue\n> with it as long\n> as we make sure replica identity columns are always present on both\n> instances.\n\nYeah, that seems like an inherent requirement.\n\n> However, need to carefully consider situations in which a server\n> subscribes to multiple \n> publications,  each publishing a different subset of columns of a table.  \n>  \n\nIsn't that pretty much the same situation as for multiple subscriptions\neach with a different set of I/U/D operations? IIRC we simply merge\nthose, so why not to do the same thing here and merge the attributes?\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 12 Jul 2021 14:42:55 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 7/12/21 11:38 AM, Rahila Syed wrote:\n> Hi Alvaro,\n> \n> Thank you for comments.\n> \n> The patch adds a function get_att_num_by_name; but we have a lsyscache.c\n> function for that purpose, get_attnum.  Maybe that one should be used\n> instead?\n> \n> Thank you for pointing that out, I agree it makes sense to reuse the\n> existing function.\n> Changed it accordingly in the attached patch.\n>  \n> \n> get_tuple_columns_map() returns a bitmapset of the attnos of the columns\n> in the given list, so its name feels wrong.  I propose\n> get_table_columnset().  However, this function is invoked for every\n> insert/update change, so it's going to be far too slow to be usable.  I\n> think you need to cache the bitmapset somewhere, so that the function is\n> only called on first use.  I didn't look very closely, but it seems that\n> struct RelationSyncEntry may be a good place to cache it.\n> \n> Makes sense, changed accordingly.\n>  \n\nTo nitpick, I find \"Bitmapset *att_list\" a bit annoying, because it's\nnot really a list ;-)\n\n\nFWIW \"make check\" fails for me with this version, due to segfault in\nOpenTableLists. Apparenly there's some confusion - the code expects the\nlist to contain PublicationTable nodes, and tries to extract the\nRangeVar from the elements. But the list actually contains RangeVar, so\nthis crashes and burns. See the attached backtrace.\n\nI'd bet this is because the patch uses list of RangeVar in some cases\nand list of PublicationTable in some cases, similarly to the \"row\nfiltering\" patch nearby. IMHO this is just confusing and we should\nalways pass list of PublicationTable nodes.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Mon, 12 Jul 2021 14:54:26 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Jul-12, Tomas Vondra wrote:\n\n> FWIW \"make check\" fails for me with this version, due to segfault in\n> OpenTableLists. Apparenly there's some confusion - the code expects the\n> list to contain PublicationTable nodes, and tries to extract the\n> RangeVar from the elements. But the list actually contains RangeVar, so\n> this crashes and burns. See the attached backtrace.\n> \n> I'd bet this is because the patch uses list of RangeVar in some cases\n> and list of PublicationTable in some cases, similarly to the \"row\n> filtering\" patch nearby. IMHO this is just confusing and we should\n> always pass list of PublicationTable nodes.\n\n+1 don't make the code guess what type of list it is. Changing all the\nuses of that node to deal with PublicationTable seems best.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"Cuando no hay humildad las personas se degradan\" (A. Christie)\n\n\n", "msg_date": "Mon, 12 Jul 2021 10:53:01 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi Tomas,\n\nThank you for your comments.\n\n\n>\n> >\n> > Currently, this capability is not included in the patch. If the table on\n> > the subscriber\n> > server has lesser attributes than that on the publisher server, it\n> > throws an error at the\n> > time of CREATE SUBSCRIPTION.\n> >\n>\n> That's a bit surprising, to be honest. I do understand the patch simply\n> treats the filtered columns as \"unchanged\" because that's the simplest\n> way to filter the *data* of the columns. But if someone told me we can\n> \"filter columns\" I'd expect this to work without the columns on the\n> subscriber.\n>\n> OK, I will look into adding this.\n\n\n>\n> > However, need to carefully consider situations in which a server\n> > subscribes to multiple\n> > publications, each publishing a different subset of columns of a table.\n\nIsn't that pretty much the same situation as for multiple subscriptions\n> each with a different set of I/U/D operations? IIRC we simply merge\n> those, so why not to do the same thing here and merge the attributes?\n>\n>\nYeah, I agree with the solution to merge the attributes, similar to how\noperations are merged. My concern was also from an implementation point\nof view, will it be a very drastic change. I now had a look at how remote\nrelation\nattributes are acquired for comparison with local attributes at the\nsubscriber.\nIt seems that the publisher will need to send the information about the\nfiltered columns\nfor each publication specified during CREATE SUBSCRIPTION.\nThis will be read at the subscriber side which in turn updates its cache\naccordingly.\nCurrently, the subscriber expects all attributes of a published relation to\nbe present.\nI will add code for this in the next version of the patch.\n\n To nitpick, I find \"Bitmapset *att_list\" a bit annoying, because it's\n\nnot really a list ;-)\n\n\nI will make this change with the next version\n\n\n\n> FWIW \"make check\" fails for me with this version, due to segfault in\n> OpenTableLists. Apparenly there's some confusion - the code expects the\n> list to contain PublicationTable nodes, and tries to extract the\n> RangeVar from the elements. But the list actually contains RangeVar, so\n> this crashes and burns. See the attached backtrace.\n>\n>\nThank you for the report, This is fixed in the attached version, now all\npublication\nfunction calls accept the PublicationTableInfo list.\n\nThank you,\nRahila Syed", "msg_date": "Tue, 13 Jul 2021 20:13:44 +0530", "msg_from": "Rahila Syed <rahilasyed90@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, Jul 13, 2021 at 7:44 PM Rahila Syed <rahilasyed90@gmail.com> wrote:\n\n>\n> Hi Tomas,\n>\n> Thank you for your comments.\n>\n>\n>>\n>> >\n>> > Currently, this capability is not included in the patch. If the table on\n>> > the subscriber\n>> > server has lesser attributes than that on the publisher server, it\n>> > throws an error at the\n>> > time of CREATE SUBSCRIPTION.\n>> >\n>>\n>> That's a bit surprising, to be honest. I do understand the patch simply\n>> treats the filtered columns as \"unchanged\" because that's the simplest\n>> way to filter the *data* of the columns. But if someone told me we can\n>> \"filter columns\" I'd expect this to work without the columns on the\n>> subscriber.\n>>\n>> OK, I will look into adding this.\n>\n>\n>>\n>> > However, need to carefully consider situations in which a server\n>> > subscribes to multiple\n>> > publications, each publishing a different subset of columns of a\n>> table.\n>\n> Isn't that pretty much the same situation as for multiple subscriptions\n>> each with a different set of I/U/D operations? IIRC we simply merge\n>> those, so why not to do the same thing here and merge the attributes?\n>>\n>>\n> Yeah, I agree with the solution to merge the attributes, similar to how\n> operations are merged. My concern was also from an implementation point\n> of view, will it be a very drastic change. I now had a look at how remote\n> relation\n> attributes are acquired for comparison with local attributes at the\n> subscriber.\n> It seems that the publisher will need to send the information about the\n> filtered columns\n> for each publication specified during CREATE SUBSCRIPTION.\n> This will be read at the subscriber side which in turn updates its cache\n> accordingly.\n> Currently, the subscriber expects all attributes of a published relation\n> to be present.\n> I will add code for this in the next version of the patch.\n>\n> To nitpick, I find \"Bitmapset *att_list\" a bit annoying, because it's\n>\n> not really a list ;-)\n>\n>\n> I will make this change with the next version\n>\n>\n>\n>> FWIW \"make check\" fails for me with this version, due to segfault in\n>> OpenTableLists. Apparenly there's some confusion - the code expects the\n>> list to contain PublicationTable nodes, and tries to extract the\n>> RangeVar from the elements. But the list actually contains RangeVar, so\n>> this crashes and burns. See the attached backtrace.\n>>\n>>\n> Thank you for the report, This is fixed in the attached version, now all\n> publication\n> function calls accept the PublicationTableInfo list.\n>\n> Thank you,\n> Rahila Syed\n>\n>\n>\n\nThe patch does not apply, and an rebase is required\n\nHunk #8 succeeded at 1259 (offset 99 lines).\nHunk #9 succeeded at 1360 (offset 99 lines).\n1 out of 9 hunks FAILED -- saving rejects to file\nsrc/backend/replication/pgoutput/pgoutput.c.rej\npatching file src/include/catalog/pg_publication.h\n\n\nChanging the status to \"Waiting on Author\"\n\n-- \nIbrar Ahmed\n\nOn Tue, Jul 13, 2021 at 7:44 PM Rahila Syed <rahilasyed90@gmail.com> wrote:Hi Tomas,Thank you for your comments. \n>  \n> Currently, this capability is not included in the patch. If the table on\n> the subscriber\n> server has lesser attributes than that on the publisher server, it\n> throws an error at the \n> time of CREATE SUBSCRIPTION.\n> \n\nThat's a bit surprising, to be honest. I do understand the patch simply\ntreats the filtered columns as \"unchanged\" because that's the simplest\nway to filter the *data* of the columns. But if someone told me we can\n\"filter columns\" I'd expect this to work without the columns on the\nsubscriber.\nOK, I will look into adding this.  \n\n> However, need to carefully consider situations in which a server\n> subscribes to multiple \n> publications,  each publishing a different subset of columns of a table. Isn't that pretty much the same situation as for multiple subscriptions\neach with a different set of I/U/D operations? IIRC we simply merge\nthose, so why not to do the same thing here and merge the attributes?\n Yeah, I agree with the solution to merge the attributes, similar to how operations are merged. My concern was also from an implementation point of view, will it be a very drastic change. I now had a look at how remote relationattributes are acquired for comparison with local attributes at the subscriber. It seems that the publisher will need to send the information about the filtered columns for each publication specified during CREATE SUBSCRIPTION.This will be read at the subscriber side which in turn updates its cache accordingly. Currently, the subscriber expects all attributes of a published relation to be present. I will add code for this in the next version of the patch. To nitpick, I find \"Bitmapset *att_list\" a bit annoying, because it's  not really a list ;-)  I will make this change with the next version   FWIW \"make check\" fails for me with this version, due to segfault inOpenTableLists. Apparenly there's some confusion - the code expects thelist to contain PublicationTable nodes, and tries to extract theRangeVar from the elements. But the list actually contains RangeVar, sothis crashes and burns. See the attached backtrace. Thank you for the report, This is fixed in the attached version, now all publicationfunction calls accept the PublicationTableInfo list.Thank you,Rahila Syed    \nThe patch does not apply, and an rebase is requiredHunk #8 succeeded at 1259 (offset 99 lines).\nHunk #9 succeeded at 1360 (offset 99 lines).\n1 out of 9 hunks FAILED -- saving rejects to file src/backend/replication/pgoutput/pgoutput.c.rej\npatching file src/include/catalog/pg_publication.h\nChanging the status to \"Waiting on Author\"-- Ibrar Ahmed", "msg_date": "Mon, 19 Jul 2021 15:20:32 +0500", "msg_from": "Ibrar Ahmed <ibrar.ahmad@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hello,\n\nI think this looks good regarding the PublicationRelationInfo API that was\ndiscussed.\n\nLooking at OpenTableList(), I think you forgot to update the comment --\nit says \"open relations specified by a RangeVar list\", but the list is\nnow of PublicationTable. Also I think it would be good to say that the\nreturned tables are PublicationRelationInfo, maybe such as \"In the\nreturned list of PublicationRelationInfo, the tables are locked ...\"\n\nIn AlterPublicationTables() I was confused by some code that seemed\ncommented a bit too verbosely (for a moment I thought the whole list was\nbeing copied into a different format). May I suggest something more\ncompact like\n\n\t\t\t/* Not yet in list; open it and add it to the list */\n\t\t\tif (!found)\n\t\t\t{\n\t\t\t\tRelation\toldrel;\n\t\t\t\tPublicationRelationInfo *pubrel;\n\t\t\t \n\t\t\t\toldrel = table_open(oldrelid, ShareUpdateExclusiveLock);\n\n\t\t\t\t/* Wrap it in PublicationRelationInfo */\n\t\t\t\tpubrel = palloc(sizeof(PublicationRelationInfo));\n\t\t\t\tpubrel->relation = oldrel;\n\t\t\t\tpubrel->relid = oldrelid;\n\t\t\t\tpubrel->columns = NIL;\t\t/* not needed */\n\n\t\t\t\tdelrels = lappend(delrels, pubrel);\n\t\t\t}\n\nThanks!\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Tue, 20 Jul 2021 20:14:17 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "One thing I just happened to notice is this part of your commit message\n\n: REPLICA IDENTITY columns are always replicated\n: irrespective of column names specification.\n\n... for which you don't have any tests -- I mean, create a table with a\ncertain REPLICA IDENTITY and later try to publish a set of columns that\ndoesn't include all the columns in the replica identity, then verify\nthat those columns are indeed published.\n\nHaving said that, I'm not sure I agree with this design decision; what I\nthink this is doing is hiding from the user the fact that they are\npublishing columns that they don't want to publish. I think as a user I\nwould rather get an error in that case:\n\n ERROR: invalid column list in published set\n DETAIL: The set of published commands does not include all the replica identity columns.\n\nor something like that. Avoid possible nasty surprises of security-\nleaking nature.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\"On the other flipper, one wrong move and we're Fatal Exceptions\"\n(T.U.X.: Term Unit X - http://www.thelinuxreview.com/TUX/)\n\n\n", "msg_date": "Thu, 22 Jul 2021 14:48:32 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi,\n\n\n\n> >\n>> > Currently, this capability is not included in the patch. If the table on\n>> > the subscriber\n>> > server has lesser attributes than that on the publisher server, it\n>> > throws an error at the\n>> > time of CREATE SUBSCRIPTION.\n>> >\n>>\n>> That's a bit surprising, to be honest. I do understand the patch simply\n>> treats the filtered columns as \"unchanged\" because that's the simplest\n>> way to filter the *data* of the columns. But if someone told me we can\n>> \"filter columns\" I'd expect this to work without the columns on the\n>> subscriber.\n>>\n>> OK, I will look into adding this.\n>\n\nThis has been added in the attached patch. Now, instead of\ntreating the filtered columns as unchanged and sending a byte\nwith that information, unfiltered columns are not sent to the subscriber\nserver at all. This along with saving the network bandwidth, allows\nthe logical replication to even work between tables with different numbers\nof\ncolumns i.e with the table on subscriber server containing only the\nfiltered\ncolumns. Currently, replica identity columns are replicated irrespective of\nthe presence of the column filters, hence the table on the subscriber side\nmust\ncontain the replica identity columns.\n\nThe patch adds a new parse node PublicationTable, but doesn't add\n> copyfuncs.c, equalfuncs.c, readfuncs.c, outfuncs.c support for it.\n\n\nThanks, added this.\n\n\n> Looking at OpenTableList(), I think you forgot to update the comment --\n> it says \"open relations specified by a RangeVar list\",\n\n\nThank you for the review, Modified this.\n\nTo nitpick, I find \"Bitmapset *att_list\" a bit annoying, because it's\n> not really a list ;-)\n\n\nChanged this.\n\n>\n> It's not super clear to me that strlist_to_textarray() and related\n> processing will behave sanely when the column names contain weird\n> characters such as commas or quotes, or just when used with uppercase\n> column names. Maybe it's worth having tests that try to break such\n> cases.\n\n\nAdded a few test cases for this.\n\nIn AlterPublicationTables() I was confused by some code that seemed\n> commented a bit too verbosely\n\n\nModified this as per the suggestion.\n\n: REPLICA IDENTITY columns are always replicated\n> : irrespective of column names specification.\n\n\n... for which you don't have any tests\n\n\nI have added these tests.\n\nHaving said that, I'm not sure I agree with this design decision; what I\n> think this is doing is hiding from the user the fact that they are\n> publishing columns that they don't want to publish. I think as a user I\n> would rather get an error in that case:\n\n\n ERROR: invalid column list in published set\n> DETAIL: The set of published commands does not include all the replica\n> identity columns.\n\n\nor something like that. Avoid possible nasty surprises of security-\n> leaking nature.\n\n\nOk, Thank you for your opinion. I agree that giving an explicit error in\nthis case will be safer.\nI will include this, in case there are no counter views.\n\nThank you for your review comments. Please find attached the rebased and\nupdated patch.\n\n\nThank you,\nRahila Syed", "msg_date": "Mon, 9 Aug 2021 01:29:50 +0530", "msg_from": "Rahila Syed <rahilasyed90@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Aug 9, 2021 at 1:36 AM Rahila Syed <rahilasyed90@gmail.com> wrote:\n>\n>> Having said that, I'm not sure I agree with this design decision; what I\n>> think this is doing is hiding from the user the fact that they are\n>> publishing columns that they don't want to publish. I think as a user I\n>> would rather get an error in that case:\n>\n>\n>> ERROR: invalid column list in published set\n>> DETAIL: The set of published commands does not include all the replica identity columns.\n>\n>\n>> or something like that. Avoid possible nasty surprises of security-\n>> leaking nature.\n>\n>\n> Ok, Thank you for your opinion. I agree that giving an explicit error in this case will be safer.\n>\n\n+1 for an explicit error in this case.\n\nCan you please explain why you have the restriction for including\nreplica identity columns and do we want to put a similar restriction\nfor the primary key? As far as I understand, if we allow default\nvalues on subscribers for replica identity, then probably updates,\ndeletes won't work as they need to use replica identity (or PK) to\nsearch the required tuple. If so, shouldn't we add this restriction\nonly when a publication has been defined for one of these (Update,\nDelete) actions?\n\nAnother point is what if someone drops the column used in one of the\npublications? Do we want to drop the entire relation from publication\nor just remove the column filter or something else?\n\nDo we want to consider that the columns specified in the filter must\nnot have NOT NULL constraint? Because, otherwise, the subscriber will\nerror out inserting such rows?\n\nMinor comments:\n================\n pq_sendbyte(out, flags);\n-\n /* attribute name */\n pq_sendstring(out, NameStr(att->attname));\n\n@@ -953,6 +1000,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel)\n\n /* attribute mode */\n pq_sendint32(out, att->atttypmod);\n+\n }\n\n bms_free(idattrs);\ndiff --git a/src/backend/replication/logical/relation.c\nb/src/backend/replication/logical/relation.c\nindex c37e2a7e29..d7a7b00841 100644\n--- a/src/backend/replication/logical/relation.c\n+++ b/src/backend/replication/logical/relation.c\n@@ -354,7 +354,6 @@ logicalrep_rel_open(LogicalRepRelId remoteid,\nLOCKMODE lockmode)\n\n attnum = logicalrep_rel_att_by_name(remoterel,\n NameStr(attr->attname));\n-\n entry->attrmap->attnums[i] = attnum;\n\nThere are quite a few places in the patch that contains spurious line\nadditions or removals.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 9 Aug 2021 15:59:16 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Aug 9, 2021 at 3:59 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Mon, Aug 9, 2021 at 1:36 AM Rahila Syed <rahilasyed90@gmail.com> wrote:\n> >\n> >> Having said that, I'm not sure I agree with this design decision; what I\n> >> think this is doing is hiding from the user the fact that they are\n> >> publishing columns that they don't want to publish. I think as a user I\n> >> would rather get an error in that case:\n> >\n> >\n> >> ERROR: invalid column list in published set\n> >> DETAIL: The set of published commands does not include all the replica identity columns.\n> >\n> >\n> >> or something like that. Avoid possible nasty surprises of security-\n> >> leaking nature.\n> >\n> >\n> > Ok, Thank you for your opinion. I agree that giving an explicit error in this case will be safer.\n> >\n>\n> +1 for an explicit error in this case.\n>\n> Can you please explain why you have the restriction for including\n> replica identity columns and do we want to put a similar restriction\n> for the primary key? As far as I understand, if we allow default\n> values on subscribers for replica identity, then probably updates,\n> deletes won't work as they need to use replica identity (or PK) to\n> search the required tuple. If so, shouldn't we add this restriction\n> only when a publication has been defined for one of these (Update,\n> Delete) actions?\n>\n> Another point is what if someone drops the column used in one of the\n> publications? Do we want to drop the entire relation from publication\n> or just remove the column filter or something else?\n>\n> Do we want to consider that the columns specified in the filter must\n> not have NOT NULL constraint? Because, otherwise, the subscriber will\n> error out inserting such rows?\n>\n\nI noticed that other databases provide this feature [1] and they allow\nusers to specify \"Columns that are included in Filter\" or specify \"All\ncolumns to be included in filter except for a subset of columns\". I am\nnot sure if want to provide both ways in the first version but at\nleast we should consider it as a future extensibility requirement and\ntry to choose syntax accordingly.\n\n[1] - https://docs.oracle.com/en/cloud/paas/goldengate-cloud/gwuad/selecting-columns.html#GUID-9A851C8B-48F7-43DF-8D98-D086BE069E20\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 9 Aug 2021 16:15:32 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi Amit,\n\nThanks for your review.\n\n\n> Can you please explain why you have the restriction for including\n> replica identity columns and do we want to put a similar restriction\n> for the primary key? As far as I understand, if we allow default\n> values on subscribers for replica identity, then probably updates,\n> deletes won't work as they need to use replica identity (or PK) to\n> search the required tuple. If so, shouldn't we add this restriction\n> only when a publication has been defined for one of these (Update,\n> Delete) actions?\n>\n\nYes, like you mentioned they are needed for Updates and Deletes to work.\nThe restriction for including replica identity columns in column filters\nexists because\nIn case the replica identity column values did not change, the old row\nreplica identity columns\nare not sent to the subscriber, thus we would need new replica identity\ncolumns\nto be sent to identify the row that is to be Updated or Deleted.\nI haven't tested if it would break Insert as well though. I will update\nthe patch accordingly.\n\n\n> Another point is what if someone drops the column used in one of the\n> publications? Do we want to drop the entire relation from publication\n> or just remove the column filter or something else?\n>\n>\nThanks for pointing this out. Currently, this is not handled in the patch.\nI think dropping the column from the filter would make sense on the lines\nof the table being dropped from publication, in case of drop table.\n\n\n> Do we want to consider that the columns specified in the filter must\n> not have NOT NULL constraint? Because, otherwise, the subscriber will\n> error out inserting such rows?\n>\n> I think you mean columns *not* specified in the filter must not have NOT\nNULL constraint\non the subscriber, as this will break during insert, as it will try to\ninsert NULL for columns\nnot sent by the publisher.\nI will look into fixing this. Probably this won't be a problem in\ncase the column is auto generated or contains a default value.\n\n\n> Minor comments:\n> ================\n> pq_sendbyte(out, flags);\n> -\n> /* attribute name */\n> pq_sendstring(out, NameStr(att->attname));\n>\n> @@ -953,6 +1000,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel)\n>\n> /* attribute mode */\n> pq_sendint32(out, att->atttypmod);\n> +\n> }\n>\n> bms_free(idattrs);\n> diff --git a/src/backend/replication/logical/relation.c\n> b/src/backend/replication/logical/relation.c\n> index c37e2a7e29..d7a7b00841 100644\n> --- a/src/backend/replication/logical/relation.c\n> +++ b/src/backend/replication/logical/relation.c\n> @@ -354,7 +354,6 @@ logicalrep_rel_open(LogicalRepRelId remoteid,\n> LOCKMODE lockmode)\n>\n> attnum = logicalrep_rel_att_by_name(remoterel,\n> NameStr(attr->attname));\n> -\n> entry->attrmap->attnums[i] = attnum;\n>\n> There are quite a few places in the patch that contains spurious line\n> additions or removals.\n>\n>\nThank you for your comments, I will fix these.\n\nThank you,\nRahila Syed\n\nHi Amit,Thanks for your review.\n\nCan you please explain why you have the restriction for including\nreplica identity columns and do we want to put a similar restriction\nfor the primary key? As far as I understand, if we allow default\nvalues on subscribers for replica identity, then probably updates,\ndeletes won't work as they need to use replica identity (or PK) to\nsearch the required tuple. If so, shouldn't we add this restriction\nonly when a publication has been defined for one of these (Update,\nDelete) actions? Yes, like you mentioned they are needed for Updates and Deletes to work.The restriction for including replica identity columns in column filters exists becauseIn case the replica identity column values did not change, the old row replica identity columns are not sent to the subscriber, thus we would need new replica identity columns to be sent to identify the row that is to be Updated or Deleted. I haven't tested if it would break Insert as well  though. I will update the patch accordingly.\n\nAnother point is what if someone drops the column used in one of the\npublications? Do we want to drop the entire relation from publication\nor just remove the column filter or something else?\nThanks for pointing this out. Currently, this is not handled in the patch.I think dropping the column from the filter would make sense on the linesof the table being dropped from publication, in case of drop table. \nDo we want to consider that the columns specified in the filter must\nnot have NOT NULL constraint? Because, otherwise, the subscriber will\nerror out inserting such rows?\nI think you mean columns *not* specified in the filter must not have NOT NULL constrainton the subscriber, as this will break during insert, as it will try to insert NULL for columnsnot sent by the publisher. I will look into fixing this. Probably this won't be a problem incase the column is auto generated or contains a default value. \nMinor comments:\n================\n  pq_sendbyte(out, flags);\n-\n  /* attribute name */\n  pq_sendstring(out, NameStr(att->attname));\n\n@@ -953,6 +1000,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel)\n\n  /* attribute mode */\n  pq_sendint32(out, att->atttypmod);\n+\n  }\n\n  bms_free(idattrs);\ndiff --git a/src/backend/replication/logical/relation.c\nb/src/backend/replication/logical/relation.c\nindex c37e2a7e29..d7a7b00841 100644\n--- a/src/backend/replication/logical/relation.c\n+++ b/src/backend/replication/logical/relation.c\n@@ -354,7 +354,6 @@ logicalrep_rel_open(LogicalRepRelId remoteid,\nLOCKMODE lockmode)\n\n  attnum = logicalrep_rel_att_by_name(remoterel,\n  NameStr(attr->attname));\n-\n  entry->attrmap->attnums[i] = attnum;\n\nThere are quite a few places in the patch that contains spurious line\nadditions or removals.\nThank you for your comments, I will fix these. Thank you,Rahila Syed", "msg_date": "Thu, 12 Aug 2021 08:40:00 +0530", "msg_from": "Rahila Syed <rahilasyed90@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Aug 12, 2021 at 8:40 AM Rahila Syed <rahilasyed90@gmail.com> wrote:\n>\n>>\n>> Can you please explain why you have the restriction for including\n>> replica identity columns and do we want to put a similar restriction\n>> for the primary key? As far as I understand, if we allow default\n>> values on subscribers for replica identity, then probably updates,\n>> deletes won't work as they need to use replica identity (or PK) to\n>> search the required tuple. If so, shouldn't we add this restriction\n>> only when a publication has been defined for one of these (Update,\n>> Delete) actions?\n>\n>\n> Yes, like you mentioned they are needed for Updates and Deletes to work.\n> The restriction for including replica identity columns in column filters exists because\n> In case the replica identity column values did not change, the old row replica identity columns\n> are not sent to the subscriber, thus we would need new replica identity columns\n> to be sent to identify the row that is to be Updated or Deleted.\n> I haven't tested if it would break Insert as well though. I will update the patch accordingly.\n>\n\nOkay, but then we also need to ensure that the user shouldn't be\nallowed to enable the 'update' or 'delete' for a publication that\ncontains some filter that doesn't have replica identity columns.\n\n>>\n>> Another point is what if someone drops the column used in one of the\n>> publications? Do we want to drop the entire relation from publication\n>> or just remove the column filter or something else?\n>>\n>\n> Thanks for pointing this out. Currently, this is not handled in the patch.\n> I think dropping the column from the filter would make sense on the lines\n> of the table being dropped from publication, in case of drop table.\n>\n\nI think it would be tricky if you want to remove the column from the\nfilter because you need to recompute the entire filter and update it\nagain. Also, you might need to do this for all the publications that\nhave a particular column in their filter clause. It might be easier to\ndrop the entire filter but you can check if it is easier another way\nthan it is good.\n\n>>\n>> Do we want to consider that the columns specified in the filter must\n>> not have NOT NULL constraint? Because, otherwise, the subscriber will\n>> error out inserting such rows?\n>>\n> I think you mean columns *not* specified in the filter must not have NOT NULL constraint\n> on the subscriber, as this will break during insert, as it will try to insert NULL for columns\n> not sent by the publisher.\n>\n\nRight.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 12 Aug 2021 13:59:27 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi,\n\n\n>> Another point is what if someone drops the column used in one of the\n>> publications? Do we want to drop the entire relation from publication\n>> or just remove the column filter or something else?\n>>\n>\nAfter thinking about this, I think it is best to remove the entire table\nfrom publication,\nif a column specified in the column filter is dropped from the table.\nBecause, if we drop the entire filter without dropping the table, it means\nall the columns will be replicated,\nand the downstream server table might not have those columns.\nIf we drop only the column from the filter we might have to recreate the\nfilter and check for replica identity.\nThat means if the replica identity column is dropped, you can't drop it\nfrom the filter,\nand might have to drop the entire publication-table mapping anyways.\n\nThus, I think it is cleanest to drop the entire relation from publication.\n\nThis has been implemented in the attached version.\n\n\n> Do we want to consider that the columns specified in the filter must\n>> not have NOT NULL constraint? Because, otherwise, the subscriber will\n>> error out inserting such rows?\n>>\n>> I think you mean columns *not* specified in the filter must not have NOT\n> NULL constraint\n> on the subscriber, as this will break during insert, as it will try to\n> insert NULL for columns\n> not sent by the publisher.\n> I will look into fixing this. Probably this won't be a problem in\n> case the column is auto generated or contains a default value.\n>\n>\nI am not sure if this needs to be handled. Ideally, we need to prevent the\nsubscriber tables from having a NOT NULL\nconstraint if the publisher uses column filters to publish the values of\nthe table. There is no way\nto do this at the time of creating a table on subscriber.\nAs this involves querying the publisher for this information, it can be\ndone at the time of initial table synchronization.\ni.e error out if any of the subscribed tables has NOT NULL constraint on\nnon-filter columns.\nThis will lead to the user dropping and recreating the subscription after\nremoving the\nNOT NULL constraint from the table.\nI think the same can be achieved by doing nothing and letting the\nsubscriber error out while inserting rows.\n\nMinor comments:\n>> ================\n>> pq_sendbyte(out, flags);\n>> -\n>> /* attribute name */\n>> pq_sendstring(out, NameStr(att->attname));\n>>\n>> @@ -953,6 +1000,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel)\n>>\n>> /* attribute mode */\n>> pq_sendint32(out, att->atttypmod);\n>> +\n>> }\n>>\n>> bms_free(idattrs);\n>> diff --git a/src/backend/replication/logical/relation.c\n>> b/src/backend/replication/logical/relation.c\n>> index c37e2a7e29..d7a7b00841 100644\n>> --- a/src/backend/replication/logical/relation.c\n>> +++ b/src/backend/replication/logical/relation.c\n>> @@ -354,7 +354,6 @@ logicalrep_rel_open(LogicalRepRelId remoteid,\n>> LOCKMODE lockmode)\n>>\n>> attnum = logicalrep_rel_att_by_name(remoterel,\n>> NameStr(attr->attname));\n>> -\n>> entry->attrmap->attnums[i] = attnum;\n>>\n>> There are quite a few places in the patch that contains spurious line\n>> additions or removals.\n>>\n>>\n> Fixed these in the attached patch.\n\nHaving said that, I'm not sure I agree with this design decision; what I\n> think this is doing is hiding from the user the fact that they are\n> publishing columns that they don't want to publish. I think as a user I\n> would rather get an error in that case:\n\n\n ERROR: invalid column list in published set\n> DETAIL: The set of published commands does not include all the replica\n> identity columns.\n\n\nAdded this.\n\nAlso added some more tests. Please find attached a rebased and updated\npatch.\n\nThank you,\nRahila Syed", "msg_date": "Thu, 2 Sep 2021 02:51:41 +0530", "msg_from": "Rahila Syed <rahilasyed90@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Sep 2, 2021 at 7:21 AM Rahila Syed <rahilasyed90@gmail.com> wrote:\n>\n...\n>\n> Also added some more tests. Please find attached a rebased and updated patch.\n\nI fetched and applied the v4 patch.\n\nIt applied cleanly, and the build and make check was OK.\n\nBut I encountered some errors running the TAP subscription tests, as follows:\n\n...\nt/018_stream_subxact_abort.pl ...... ok\nt/019_stream_subxact_ddl_abort.pl .. ok\nt/020_messages.pl .................. ok\nt/021_column_filter.pl ............. 1/9\n# Failed test 'insert on column c is not replicated'\n# at t/021_column_filter.pl line 126.\n# got: ''\n# expected: '1|abc'\n\n# Failed test 'update on column c is not replicated'\n# at t/021_column_filter.pl line 130.\n# got: ''\n# expected: '1|abc'\n# Looks like you failed 2 tests of 9.\nt/021_column_filter.pl ............. Dubious, test returned 2 (wstat 512, 0x200)\nFailed 2/9 subtests\nt/021_twophase.pl .................. ok\nt/022_twophase_cascade.pl .......... ok\nt/023_twophase_stream.pl ........... ok\nt/024_add_drop_pub.pl .............. ok\nt/100_bugs.pl ...................... ok\n\nTest Summary Report\n-------------------\nt/021_column_filter.pl (Wstat: 512 Tests: 9 Failed: 2)\n Failed tests: 6-7\n Non-zero exit status: 2\nFiles=26, Tests=263, 192 wallclock secs ( 0.57 usr 0.09 sys + 110.17\ncusr 25.45 csys = 136.28 CPU)\nResult: FAIL\nmake: *** [check] Error 1\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia\n\n\n", "msg_date": "Thu, 2 Sep 2021 17:53:59 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-02, Rahila Syed wrote:\n\n> After thinking about this, I think it is best to remove the entire table\n> from publication,\n> if a column specified in the column filter is dropped from the table.\n\nHmm, I think it would be cleanest to give responsibility to the user: if\nthe column to be dropped is in the filter, then raise an error, aborting\nthe drop. Then it is up to them to figure out what to do.\n\n\n\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\"El destino baraja y nosotros jugamos\" (A. Schopenhauer)\n\n\n", "msg_date": "Thu, 2 Sep 2021 04:49:08 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "I think the WITH RECURSIVE query would be easier and more performant by\nusing pg_partition_tree and pg_partition_root.\n\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\"Porque Kim no hacía nada, pero, eso sí,\ncon extraordinario éxito\" (\"Kim\", Kipling)\n\n\n", "msg_date": "Thu, 2 Sep 2021 04:56:39 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Sep 2, 2021 at 2:19 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-02, Rahila Syed wrote:\n>\n> > After thinking about this, I think it is best to remove the entire table\n> > from publication,\n> > if a column specified in the column filter is dropped from the table.\n>\n> Hmm, I think it would be cleanest to give responsibility to the user: if\n> the column to be dropped is in the filter, then raise an error, aborting\n> the drop.\n>\n\nDo you think that will make sense if the user used Cascade (Alter\nTable ... Drop Column ... Cascade)?\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Sat, 4 Sep 2021 09:41:30 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Sep 2, 2021 at 2:51 AM Rahila Syed <rahilasyed90@gmail.com> wrote:\n>\n>>>\n>>> Do we want to consider that the columns specified in the filter must\n>>> not have NOT NULL constraint? Because, otherwise, the subscriber will\n>>> error out inserting such rows?\n>>>\n>> I think you mean columns *not* specified in the filter must not have NOT NULL constraint\n>> on the subscriber, as this will break during insert, as it will try to insert NULL for columns\n>> not sent by the publisher.\n>> I will look into fixing this. Probably this won't be a problem in\n>> case the column is auto generated or contains a default value.\n>>\n>\n> I am not sure if this needs to be handled. Ideally, we need to prevent the subscriber tables from having a NOT NULL\n> constraint if the publisher uses column filters to publish the values of the table. There is no way\n> to do this at the time of creating a table on subscriber.\n>\n> As this involves querying the publisher for this information, it can be done at the time of initial table synchronization.\n> i.e error out if any of the subscribed tables has NOT NULL constraint on non-filter columns.\n> This will lead to the user dropping and recreating the subscription after removing the\n> NOT NULL constraint from the table.\n> I think the same can be achieved by doing nothing and letting the subscriber error out while inserting rows.\n>\n\nThat makes sense and also it is quite possible that users don't have\nsuch columns in the tables on subscribers. I guess we can add such a\nrecommendation in the docs instead of doing anything in the code.\n\nFew comments:\n============\n1.\n+\n+ /*\n+ * Cannot specify column filter when REPLICA IDENTITY IS FULL\n+ * or if column filter does not contain REPLICA IDENITY columns\n+ */\n+ if (targetcols != NIL)\n+ {\n+ if (replidentfull)\n+ ereport(ERROR,\n+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),\n+ errmsg(\"cannot add relation \\\"%s\\\" to publication\",\n+ RelationGetRelationName(targetrel)),\n+ errdetail(\"Cannot have column filter with REPLICA IDENTITY FULL\")));\n\nWhy do we want to have such a restriction for REPLICA IDENTITY FULL? I\nthink it is better to expand comments in that regards.\n\n2.\n@@ -839,7 +839,6 @@ NextCopyFrom(CopyFromState cstate, ExprContext *econtext,\n ereport(ERROR,\n (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),\n errmsg(\"extra data after last expected column\")));\n-\n fieldno = 0;\n\n@@ -944,7 +992,6 @@ logicalrep_write_attrs(StringInfo out, Relation rel)\n flags |= LOGICALREP_IS_REPLICA_IDENTITY;\n\n pq_sendbyte(out, flags);\n-\n /* attribute name */\n pq_sendstring(out, NameStr(att->attname));\n\n@@ -953,6 +1000,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel)\n\n /* attribute mode */\n pq_sendint32(out, att->atttypmod);\n+\n }\n\nSpurious line removals and addition.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Sat, 4 Sep 2021 10:12:53 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Sat, Sep 4, 2021 at 10:12 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Thu, Sep 2, 2021 at 2:51 AM Rahila Syed <rahilasyed90@gmail.com> wrote:\n> >\n> >>>\n> >>> Do we want to consider that the columns specified in the filter must\n> >>> not have NOT NULL constraint? Because, otherwise, the subscriber will\n> >>> error out inserting such rows?\n> >>>\n> >> I think you mean columns *not* specified in the filter must not have NOT NULL constraint\n> >> on the subscriber, as this will break during insert, as it will try to insert NULL for columns\n> >> not sent by the publisher.\n> >> I will look into fixing this. Probably this won't be a problem in\n> >> case the column is auto generated or contains a default value.\n> >>\n> >\n> > I am not sure if this needs to be handled. Ideally, we need to prevent the subscriber tables from having a NOT NULL\n> > constraint if the publisher uses column filters to publish the values of the table. There is no way\n> > to do this at the time of creating a table on subscriber.\n> >\n> > As this involves querying the publisher for this information, it can be done at the time of initial table synchronization.\n> > i.e error out if any of the subscribed tables has NOT NULL constraint on non-filter columns.\n> > This will lead to the user dropping and recreating the subscription after removing the\n> > NOT NULL constraint from the table.\n> > I think the same can be achieved by doing nothing and letting the subscriber error out while inserting rows.\n> >\n>\n> That makes sense and also it is quite possible that users don't have\n> such columns in the tables on subscribers. I guess we can add such a\n> recommendation in the docs instead of doing anything in the code.\n>\n> Few comments:\n> ============\n>\n\nDid you give any thoughts to my earlier suggestion related to syntax [1]?\n\n[1] - https://www.postgresql.org/message-id/CAA4eK1J9b_0_PMnJ2jq9E55bcbmTKdUmy6jPnkf1Zwy2jxah_g%40mail.gmail.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Sat, 4 Sep 2021 10:30:17 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-04, Amit Kapila wrote:\n\n> On Thu, Sep 2, 2021 at 2:19 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> >\n> > On 2021-Sep-02, Rahila Syed wrote:\n> >\n> > > After thinking about this, I think it is best to remove the entire table\n> > > from publication,\n> > > if a column specified in the column filter is dropped from the table.\n> >\n> > Hmm, I think it would be cleanest to give responsibility to the user: if\n> > the column to be dropped is in the filter, then raise an error, aborting\n> > the drop.\n> \n> Do you think that will make sense if the user used Cascade (Alter\n> Table ... Drop Column ... Cascade)?\n\n... ugh. Since CASCADE is already defined to be a potentially-data-loss\noperation, then that may be acceptable behavior. For sure the default\nRESTRICT behavior shouldn't do it, though.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Sat, 4 Sep 2021 10:41:04 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Sat, Sep 4, 2021 at 8:11 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-04, Amit Kapila wrote:\n>\n> > On Thu, Sep 2, 2021 at 2:19 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > >\n> > > On 2021-Sep-02, Rahila Syed wrote:\n> > >\n> > > > After thinking about this, I think it is best to remove the entire table\n> > > > from publication,\n> > > > if a column specified in the column filter is dropped from the table.\n> > >\n> > > Hmm, I think it would be cleanest to give responsibility to the user: if\n> > > the column to be dropped is in the filter, then raise an error, aborting\n> > > the drop.\n> >\n> > Do you think that will make sense if the user used Cascade (Alter\n> > Table ... Drop Column ... Cascade)?\n>\n> ... ugh. Since CASCADE is already defined to be a potentially-data-loss\n> operation, then that may be acceptable behavior. For sure the default\n> RESTRICT behavior shouldn't do it, though.\n>\n\nThat makes sense to me.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 6 Sep 2021 08:53:39 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi,\n\nOn Mon, Sep 6, 2021 at 8:53 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n\n> On Sat, Sep 4, 2021 at 8:11 PM Alvaro Herrera <alvherre@alvh.no-ip.org>\n> wrote:\n> >\n> > On 2021-Sep-04, Amit Kapila wrote:\n> >\n> > > On Thu, Sep 2, 2021 at 2:19 PM Alvaro Herrera <alvherre@alvh.no-ip.org>\n> wrote:\n> > > >\n> > > > On 2021-Sep-02, Rahila Syed wrote:\n> > > >\n> > > > > After thinking about this, I think it is best to remove the entire\n> table\n> > > > > from publication,\n> > > > > if a column specified in the column filter is dropped from the\n> table.\n> > > >\n> > > > Hmm, I think it would be cleanest to give responsibility to the\n> user: if\n> > > > the column to be dropped is in the filter, then raise an error,\n> aborting\n> > > > the drop.\n> > >\n> > > Do you think that will make sense if the user used Cascade (Alter\n> > > Table ... Drop Column ... Cascade)?\n> >\n> > ... ugh. Since CASCADE is already defined to be a potentially-data-loss\n> > operation, then that may be acceptable behavior. For sure the default\n> > RESTRICT behavior shouldn't do it, though.\n> >\n>\n> That makes sense to me.\n>\n> However, the default (RESTRICT) behaviour of DROP TABLE allows\nremoving the table from the publication. I have implemented the removal of\ntable from publication\non drop column (RESTRICT) on the same lines.\n\nAlthough it does make sense to not allow dropping tables from publication,\nin case of RESTRICT.\nIt makes me wonder how DROP TABLE (RESTRICT) allows cascading the drop\ntable to publication.\n\nDid you give any thoughts to my earlier suggestion related to syntax [1]?\n\n\n> [1] -\n> https://www.postgresql.org/message-id/CAA4eK1J9b_0_PMnJ2jq9E55bcbmTKdUmy6jPnkf1Zwy2jxah_g%40mail.gmail.com\n\n\nFor future support to replicate all columns except (x,y,z), I think some\noptional keywords like\nCOLUMNS NOT IN can be inserted between table name and (*columns_list*) as\nfollows.\nALTER PUBLICATION ADD TABLE tab_name [COLUMNS NOT IN] (x,y,z)\nI think this should be possible as a future addition to proposed syntax in\nthe patch.\nPlease let me know your opinion.\n\nThank you,\nRahila Syed\n\nHi, On Mon, Sep 6, 2021 at 8:53 AM Amit Kapila <amit.kapila16@gmail.com> wrote:On Sat, Sep 4, 2021 at 8:11 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-04, Amit Kapila wrote:\n>\n> > On Thu, Sep 2, 2021 at 2:19 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > >\n> > > On 2021-Sep-02, Rahila Syed wrote:\n> > >\n> > > > After thinking about this, I think it is best to remove the entire table\n> > > > from publication,\n> > > > if a column specified in the column filter is dropped from the table.\n> > >\n> > > Hmm, I think it would be cleanest to give responsibility to the user: if\n> > > the column to be dropped is in the filter, then raise an error, aborting\n> > > the drop.\n> >\n> > Do you think that will make sense if the user used Cascade (Alter\n> > Table ... Drop Column ... Cascade)?\n>\n> ... ugh.  Since CASCADE is already defined to be a potentially-data-loss\n> operation, then that may be acceptable behavior.  For sure the default\n> RESTRICT behavior shouldn't do it, though.\n>\n\nThat makes sense to me.\nHowever, the default (RESTRICT) behaviour of DROP TABLE allowsremoving the table from the publication. I have implemented the removal of table from publicationon drop column (RESTRICT)  on the same lines.Although it does make sense to not allow dropping tables from publication, in case of RESTRICT.It makes me wonder how DROP TABLE (RESTRICT) allows cascading the drop table to publication.Did you give any thoughts to my earlier suggestion related to syntax [1]?[1] - https://www.postgresql.org/message-id/CAA4eK1J9b_0_PMnJ2jq9E55bcbmTKdUmy6jPnkf1Zwy2jxah_g%40mail.gmail.comFor future support to replicate all columns except (x,y,z), I think some optional keywords like COLUMNS NOT IN can be inserted between table name and (*columns_list*) as follows. ALTER PUBLICATION ADD TABLE tab_name [COLUMNS NOT IN] (x,y,z) I think this should be possible as a future addition to proposed syntax in the patch.Please let me know your opinion.Thank you,Rahila Syed", "msg_date": "Mon, 6 Sep 2021 21:25:42 +0530", "msg_from": "Rahila Syed <rahilasyed90@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-06, Rahila Syed wrote:\n\n> > > ... ugh. Since CASCADE is already defined to be a\n> > > potentially-data-loss operation, then that may be acceptable\n> > > behavior. For sure the default RESTRICT behavior shouldn't do it,\n> > > though.\n> >\n> > That makes sense to me.\n>\n> However, the default (RESTRICT) behaviour of DROP TABLE allows\n> removing the table from the publication. I have implemented the\n> removal of table from publication on drop column (RESTRICT) on the\n> same lines.\n\nBut dropping the table is quite a different action from dropping a\ncolumn, isn't it? If you drop a table, it seems perfectly reasonable\nthat it has to be removed from the publication -- essentially, when the\nuser drops a table, she is saying \"I don't care about this table\nanymore\". However, if you drop just one column, that doesn't\nnecessarily mean that the user wants to stop publishing the whole table.\nRemoving the table from the publication in ALTER TABLE DROP COLUMN seems\nlike an overreaction. (Except perhaps in the special case were the\ncolumn being dropped is the only one that was being published.)\n\nSo let's discuss what should happen. If you drop a column, and the\ncolumn is filtered out, then it seems to me that the publication should\ncontinue to have the table, and it should continue to filter out the\nother columns that were being filtered out, regardless of CASCADE/RESTRICT.\nHowever, if the column is *included* in the publication, and you drop\nit, ISTM there are two cases:\n\n1. If it's DROP CASCADE, then the list of columns to replicate should\ncontinue to have all columns it previously had, so just remove the\ncolumn that is being dropped.\n\n2. If it's DROP RESTRICT, then an error should be raised so that the\nuser can make a concious decision to remove the column from the filter\nbefore dropping the column.\n\n> Did you give any thoughts to my earlier suggestion related to syntax [1]?\n> \n> [1] https://www.postgresql.org/message-id/CAA4eK1J9b_0_PMnJ2jq9E55bcbmTKdUmy6jPnkf1Zwy2jxah_g%40mail.gmail.com\n\nThis is a great followup idea, after the current feature is committed.\nThere are a few things that have been reported in review comments; let's\nget those addressed before adding more features on top.\n\nI pushed the clerical part of this -- namely the addition of\nPublicationTable node and PublicationRelInfo struct. I attach the part\nof your v4 patch that I didn't include. It contains a couple of small\ncorrections, but I didn't do anything invasive (such as pgindent)\nbecause that would perhaps cause you too much merge pain.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/", "msg_date": "Mon, 6 Sep 2021 14:51:52 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "The code in get_rel_sync_entry() changes current memory context to\nCacheMemoryContext, then does a bunch of memory-leaking things. This is\nnot good, because that memory context has to be very carefully managed\nto avoid permanent memory leaks. I suppose you added that because you\nneed something -- probably entry->att_map -- to survive memory context\nresets, but if so then you need to change to CacheMemoryContext only\nwhen that memory is allocated, not other chunks of memory. I suspect\nyou can fix this by moving the MemoryContextSwitchTo() to just before\ncalling get_table_columnset; then all the leaky thinkgs are done in\nwhatever the original memory context is, which is fine.\n\n(However, you also need to make sure that ->att_map is carefully freed\nat the right time. It looks like this already happens in\nrel_sync_cache_relation_cb, but is rel_sync_cache_publication_cb\ncorrect? And in get_rel_sync_entry() itself, what if the entry already\nhas att_map -- should it be freed prior to allocating another one?)\n\nBy the way, I notice that your patch doesn't add documentation changes,\nwhich are of course necessary.\n\n\n\n/me is left wondering about PGOutputData->publication_names memory\nhandling ...\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Mon, 6 Sep 2021 16:16:03 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 9/6/21 7:51 PM, Alvaro Herrera wrote:\n> On 2021-Sep-06, Rahila Syed wrote:\n> \n>>>> ... ugh. Since CASCADE is already defined to be a\n>>>> potentially-data-loss operation, then that may be acceptable\n>>>> behavior. For sure the default RESTRICT behavior shouldn't do it,\n>>>> though.\n>>>\n>>> That makes sense to me.\n>>\n>> However, the default (RESTRICT) behaviour of DROP TABLE allows\n>> removing the table from the publication. I have implemented the\n>> removal of table from publication on drop column (RESTRICT) on the\n>> same lines.\n> \n> But dropping the table is quite a different action from dropping a\n> column, isn't it? If you drop a table, it seems perfectly reasonable\n> that it has to be removed from the publication -- essentially, when the\n> user drops a table, she is saying \"I don't care about this table\n> anymore\". However, if you drop just one column, that doesn't\n> necessarily mean that the user wants to stop publishing the whole table.\n> Removing the table from the publication in ALTER TABLE DROP COLUMN seems\n> like an overreaction. (Except perhaps in the special case were the\n> column being dropped is the only one that was being published.)\n> \n> So let's discuss what should happen. If you drop a column, and the\n> column is filtered out, then it seems to me that the publication should\n> continue to have the table, and it should continue to filter out the\n> other columns that were being filtered out, regardless of CASCADE/RESTRICT.\n> However, if the column is *included* in the publication, and you drop\n> it, ISTM there are two cases:\n> \n> 1. If it's DROP CASCADE, then the list of columns to replicate should\n> continue to have all columns it previously had, so just remove the\n> column that is being dropped.\n> \n> 2. If it's DROP RESTRICT, then an error should be raised so that the\n> user can make a concious decision to remove the column from the filter\n> before dropping the column.\n> \n\nFWIW I think this is a sensible behavior.\n\nI don't quite see why dropping a column should remove the table from\npublication (assuming there are some columns still replicated).\n\nOf course, it may break the subscriber (e.g. when there was NOT NULL\nconstraint on that column), but DROP RESTRICT (which I assume is the\ndefault mode) prevents that. And if DROP CASCADE is specified, I think\nit's reasonable to require the user to fix the fallout.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 6 Sep 2021 23:03:50 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Here are some v5 review comments for your consideration:\n\n------\n\n1. src/backend/access/common/relation.c\n\n@@ -215,3 +217,22 @@ relation_close(Relation relation, LOCKMODE lockmode)\n if (lockmode != NoLock)\n UnlockRelationId(&relid, lockmode);\n }\n+\n+/*\n+ * Return a bitmapset of attributes given the list of column names\n+ */\n+Bitmapset*\n+get_table_columnset(Oid relid, List *columns, Bitmapset *att_map)\n+{\n\n\nIIUC that 3rd parameter (att_map) is always passed as NULL to\nget_table_columnset function because you are constructing this\nBitmapset from scratch. Maybe I am mistaken, but if not then what is\nthe purpose of that att_map parameter?\n\n------\n\n2. src/backend/catalog/pg_publication.c\n\n+ ereport(ERROR,\n+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),\n+ errmsg(\"cannot add relation \\\"%s\\\" to publication\",\n+ RelationGetRelationName(targetrel)),\n+ errdetail(\"Column filter must include REPLICA IDENTITY columns\")));\n\nIs ERRCODE_INVALID_COLUMN_REFERENCE a more appropriate errcode to use here?\n\n------\n\n3. src/backend/catalog/pg_publication.c\n\n+ else\n+ {\n+ Bitmapset *filtermap = NULL;\n+ idattrs = RelationGetIndexAttrBitmap(targetrel,\nINDEX_ATTR_BITMAP_IDENTITY_KEY);\n\nThe RelationGetIndexAttrBitmap function comment says \"should be\nbms_free'd when not needed anymore\" but it seems the patch code is not\nfreeing idattrs when finished using it.\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia\n\n\n", "msg_date": "Tue, 7 Sep 2021 10:29:49 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Sep 6, 2021 at 9:25 PM Rahila Syed <rahilasyed90@gmail.com> wrote:\n>\n> On Mon, Sep 6, 2021 at 8:53 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>>\n>\n>> Did you give any thoughts to my earlier suggestion related to syntax [1]?\n>>\n>>\n>> [1] - https://www.postgresql.org/message-id/CAA4eK1J9b_0_PMnJ2jq9E55bcbmTKdUmy6jPnkf1Zwy2jxah_g%40mail.gmail.com\n>\n>\n> For future support to replicate all columns except (x,y,z), I think some optional keywords like\n> COLUMNS NOT IN can be inserted between table name and (*columns_list*) as follows.\n> ALTER PUBLICATION ADD TABLE tab_name [COLUMNS NOT IN] (x,y,z)\n> I think this should be possible as a future addition to proposed syntax in the patch.\n> Please let me know your opinion.\n>\n\nRight, I don't want you to implement that feature as part of this\npatch but how about using COLUMNS or similar keyword in column filter\nlike ALTER PUBLICATION ADD TABLE tab_name COLUMNS (c1, c2, ...)? This\ncan make it easier to extend in the future.\n\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 7 Sep 2021 10:30:18 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Sep 6, 2021 at 11:21 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-06, Rahila Syed wrote:\n>\n> > > > ... ugh. Since CASCADE is already defined to be a\n> > > > potentially-data-loss operation, then that may be acceptable\n> > > > behavior. For sure the default RESTRICT behavior shouldn't do it,\n> > > > though.\n> > >\n> > > That makes sense to me.\n> >\n> > However, the default (RESTRICT) behaviour of DROP TABLE allows\n> > removing the table from the publication. I have implemented the\n> > removal of table from publication on drop column (RESTRICT) on the\n> > same lines.\n>\n> But dropping the table is quite a different action from dropping a\n> column, isn't it? If you drop a table, it seems perfectly reasonable\n> that it has to be removed from the publication -- essentially, when the\n> user drops a table, she is saying \"I don't care about this table\n> anymore\". However, if you drop just one column, that doesn't\n> necessarily mean that the user wants to stop publishing the whole table.\n> Removing the table from the publication in ALTER TABLE DROP COLUMN seems\n> like an overreaction. (Except perhaps in the special case were the\n> column being dropped is the only one that was being published.)\n>\n> So let's discuss what should happen. If you drop a column, and the\n> column is filtered out, then it seems to me that the publication should\n> continue to have the table, and it should continue to filter out the\n> other columns that were being filtered out, regardless of CASCADE/RESTRICT.\n>\n\nYeah, for this case we don't need to do anything and I am not sure if\nthe patch is dropping tables in this case?\n\n> However, if the column is *included* in the publication, and you drop\n> it, ISTM there are two cases:\n>\n> 1. If it's DROP CASCADE, then the list of columns to replicate should\n> continue to have all columns it previously had, so just remove the\n> column that is being dropped.\n>\n\nNote that for a somewhat similar case in the index (where the index\nhas an expression) we drop the index if one of the columns used in the\nindex expression is dropped, so we might want to just remove the\nentire filter here instead of just removing the particular column or\nremove the entire table from publication as Rahila is proposing.\n\nI think removing just a particular column can break the replication\nfor Updates and Deletes if the removed column is part of replica\nidentity. If the entire filter is removed then also the entire\nreplication can break, so, I think Rahila's proposal is worth\nconsidering.\n\n> 2. If it's DROP RESTRICT, then an error should be raised so that the\n> user can make a concious decision to remove the column from the filter\n> before dropping the column.\n>\n\nI think one can argue for a similar case for index. If we are allowing\nthe index to be dropped even with RESTRICT then why not column filter?\n\n> > Did you give any thoughts to my earlier suggestion related to syntax [1]?\n> >\n> > [1] https://www.postgresql.org/message-id/CAA4eK1J9b_0_PMnJ2jq9E55bcbmTKdUmy6jPnkf1Zwy2jxah_g%40mail.gmail.com\n>\n> This is a great followup idea, after the current feature is committed.\n>\n\nAs mentioned in my response to Rahila, I was just thinking of using an\noptional keyword Column for column filter so that we can extend it\nlater.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 7 Sep 2021 11:05:54 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, Sep 7, 2021 at 11:06 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n\n> On Mon, Sep 6, 2021 at 11:21 PM Alvaro Herrera <alvherre@alvh.no-ip.org>\n> wrote:\n> >\n> > On 2021-Sep-06, Rahila Syed wrote:\n> >\n> > > > > ... ugh. Since CASCADE is already defined to be a\n> > > > > potentially-data-loss operation, then that may be acceptable\n> > > > > behavior. For sure the default RESTRICT behavior shouldn't do it,\n> > > > > though.\n> > > >\n> > > > That makes sense to me.\n> > >\n> > > However, the default (RESTRICT) behaviour of DROP TABLE allows\n> > > removing the table from the publication. I have implemented the\n> > > removal of table from publication on drop column (RESTRICT) on the\n> > > same lines.\n> >\n> > But dropping the table is quite a different action from dropping a\n> > column, isn't it? If you drop a table, it seems perfectly reasonable\n> > that it has to be removed from the publication -- essentially, when the\n> > user drops a table, she is saying \"I don't care about this table\n> > anymore\". However, if you drop just one column, that doesn't\n> > necessarily mean that the user wants to stop publishing the whole table.\n> > Removing the table from the publication in ALTER TABLE DROP COLUMN seems\n> > like an overreaction. (Except perhaps in the special case were the\n> > column being dropped is the only one that was being published.)\n> >\n> > So let's discuss what should happen. If you drop a column, and the\n> > column is filtered out, then it seems to me that the publication should\n> > continue to have the table, and it should continue to filter out the\n> > other columns that were being filtered out, regardless of\n> CASCADE/RESTRICT.\n> >\n>\n> Yeah, for this case we don't need to do anything and I am not sure if\n> the patch is dropping tables in this case?\n>\n> > However, if the column is *included* in the publication, and you drop\n> > it, ISTM there are two cases:\n> >\n> > 1. If it's DROP CASCADE, then the list of columns to replicate should\n> > continue to have all columns it previously had, so just remove the\n> > column that is being dropped.\n> >\n>\n> Note that for a somewhat similar case in the index (where the index\n> has an expression) we drop the index if one of the columns used in the\n> index expression is dropped, so we might want to just remove the\n> entire filter here instead of just removing the particular column or\n> remove the entire table from publication as Rahila is proposing.\n>\n> I think removing just a particular column can break the replication\n> for Updates and Deletes if the removed column is part of replica\n> identity.\n>\n\nBut how this is specific to this patch, I think the behavior should be the\nsame as what is there now, I mean now also we can drop the columns which\nare part of replica identity right.\n\n-- \nRegards,\nDilip Kumar\nEnterpriseDB: http://www.enterprisedb.com\n\nOn Tue, Sep 7, 2021 at 11:06 AM Amit Kapila <amit.kapila16@gmail.com> wrote:On Mon, Sep 6, 2021 at 11:21 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-06, Rahila Syed wrote:\n>\n> > > > ... ugh.  Since CASCADE is already defined to be a\n> > > > potentially-data-loss operation, then that may be acceptable\n> > > > behavior.  For sure the default RESTRICT behavior shouldn't do it,\n> > > > though.\n> > >\n> > > That makes sense to me.\n> >\n> > However, the default (RESTRICT) behaviour of DROP TABLE allows\n> > removing the table from the publication. I have implemented the\n> > removal of table from publication on drop column (RESTRICT)  on the\n> > same lines.\n>\n> But dropping the table is quite a different action from dropping a\n> column, isn't it?  If you drop a table, it seems perfectly reasonable\n> that it has to be removed from the publication -- essentially, when the\n> user drops a table, she is saying \"I don't care about this table\n> anymore\".  However, if you drop just one column, that doesn't\n> necessarily mean that the user wants to stop publishing the whole table.\n> Removing the table from the publication in ALTER TABLE DROP COLUMN seems\n> like an overreaction.  (Except perhaps in the special case were the\n> column being dropped is the only one that was being published.)\n>\n> So let's discuss what should happen.  If you drop a column, and the\n> column is filtered out, then it seems to me that the publication should\n> continue to have the table, and it should continue to filter out the\n> other columns that were being filtered out, regardless of CASCADE/RESTRICT.\n>\n\nYeah, for this case we don't need to do anything and I am not sure if\nthe patch is dropping tables in this case?\n\n> However, if the column is *included* in the publication, and you drop\n> it, ISTM there are two cases:\n>\n> 1. If it's DROP CASCADE, then the list of columns to replicate should\n> continue to have all columns it previously had, so just remove the\n> column that is being dropped.\n>\n\nNote that for a somewhat similar case in the index (where the index\nhas an expression) we drop the index if one of the columns used in the\nindex expression is dropped, so we might want to just remove the\nentire filter here instead of just removing the particular column or\nremove the entire table from publication as Rahila is proposing.\n\nI think removing just a particular column can break the replication\nfor Updates and Deletes if the removed column is part of replica\nidentity. But how this is specific to this patch, I think the behavior should be the same as what is there now, I mean now also we can drop the columns which are part of replica identity right.-- Regards,Dilip KumarEnterpriseDB: http://www.enterprisedb.com", "msg_date": "Tue, 7 Sep 2021 11:26:17 +0530", "msg_from": "Dilip Kumar <dilipbalaut@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, Sep 7, 2021 at 11:26 AM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n>\n> On Tue, Sep 7, 2021 at 11:06 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>>\n>> On Mon, Sep 6, 2021 at 11:21 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>> >\n>> > On 2021-Sep-06, Rahila Syed wrote:\n>> >\n>> > > > > ... ugh. Since CASCADE is already defined to be a\n>> > > > > potentially-data-loss operation, then that may be acceptable\n>> > > > > behavior. For sure the default RESTRICT behavior shouldn't do it,\n>> > > > > though.\n>> > > >\n>> > > > That makes sense to me.\n>> > >\n>> > > However, the default (RESTRICT) behaviour of DROP TABLE allows\n>> > > removing the table from the publication. I have implemented the\n>> > > removal of table from publication on drop column (RESTRICT) on the\n>> > > same lines.\n>> >\n>> > But dropping the table is quite a different action from dropping a\n>> > column, isn't it? If you drop a table, it seems perfectly reasonable\n>> > that it has to be removed from the publication -- essentially, when the\n>> > user drops a table, she is saying \"I don't care about this table\n>> > anymore\". However, if you drop just one column, that doesn't\n>> > necessarily mean that the user wants to stop publishing the whole table.\n>> > Removing the table from the publication in ALTER TABLE DROP COLUMN seems\n>> > like an overreaction. (Except perhaps in the special case were the\n>> > column being dropped is the only one that was being published.)\n>> >\n>> > So let's discuss what should happen. If you drop a column, and the\n>> > column is filtered out, then it seems to me that the publication should\n>> > continue to have the table, and it should continue to filter out the\n>> > other columns that were being filtered out, regardless of CASCADE/RESTRICT.\n>> >\n>>\n>> Yeah, for this case we don't need to do anything and I am not sure if\n>> the patch is dropping tables in this case?\n>>\n>> > However, if the column is *included* in the publication, and you drop\n>> > it, ISTM there are two cases:\n>> >\n>> > 1. If it's DROP CASCADE, then the list of columns to replicate should\n>> > continue to have all columns it previously had, so just remove the\n>> > column that is being dropped.\n>> >\n>>\n>> Note that for a somewhat similar case in the index (where the index\n>> has an expression) we drop the index if one of the columns used in the\n>> index expression is dropped, so we might want to just remove the\n>> entire filter here instead of just removing the particular column or\n>> remove the entire table from publication as Rahila is proposing.\n>>\n>> I think removing just a particular column can break the replication\n>> for Updates and Deletes if the removed column is part of replica\n>> identity.\n>\n>\n> But how this is specific to this patch, I think the behavior should be the same as what is there now, I mean now also we can drop the columns which are part of replica identity right.\n>\n\nSure, but we drop replica identity and corresponding index as well.\nThe patch ensures that replica identity columns must be part of the\ncolumn filter and now that restriction won't hold anymore. I think if\nwe want to retain that restriction then it is better to either remove\nthe entire filter or remove the entire table. Anyway, the main point\nwas that if we can remove the index/replica identity, it seems like\nthere should be the same treatment for column filter.\n\nAnother related point that occurred to me is that if the user changes\nreplica identity then probably we should ensure that the column\nfilters for the table still holds the creteria or maybe we need to\nremove the filter in that case as well. I am not sure if the patch is\nalready doing something about it and if not then isn't it better to do\nsomething about it?\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 7 Sep 2021 12:13:08 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Sep 6, 2021, at 2:51 PM, Alvaro Herrera wrote:\n> I pushed the clerical part of this -- namely the addition of\n> PublicationTable node and PublicationRelInfo struct. I attach the part\n> of your v4 patch that I didn't include. It contains a couple of small\n> corrections, but I didn't do anything invasive (such as pgindent)\n> because that would perhaps cause you too much merge pain.\nWhile updating the row filter patch [1] (because it also uses these\nstructures), I noticed that you use PublicationRelInfo as a type name instead\nof PublicationRelationInfo. I choose the latter because there is already a data\nstructure named PublicationRelInfo (pg_dump.h). It is a client-side data\nstructure but I doesn't seem a good practice to duplicate data structure names\nover the same code base.\n\n[1] https://www.postgresql.org/message-id/0c2464d4-65f4-4d91-aeb2-c5584c1350f5%40www.fastmail.com\n\n\n--\nEuler Taveira\nEDB https://www.enterprisedb.com/\n\nOn Mon, Sep 6, 2021, at 2:51 PM, Alvaro Herrera wrote:I pushed the clerical part of this -- namely the addition ofPublicationTable node and PublicationRelInfo struct.  I attach the partof your v4 patch that I didn't include.  It contains a couple of smallcorrections, but I didn't do anything invasive (such as pgindent)because that would perhaps cause you too much merge pain.While updating the row filter patch [1] (because it also uses thesestructures), I noticed that you use PublicationRelInfo as a type name insteadof PublicationRelationInfo. I choose the latter because there is already a datastructure named PublicationRelInfo (pg_dump.h). It is a client-side datastructure but I doesn't seem a good practice to duplicate data structure namesover the same code base.[1] https://www.postgresql.org/message-id/0c2464d4-65f4-4d91-aeb2-c5584c1350f5%40www.fastmail.com--Euler TaveiraEDB   https://www.enterprisedb.com/", "msg_date": "Tue, 07 Sep 2021 19:39:30 -0300", "msg_from": "\"Euler Taveira\" <euler@eulerto.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Sep 6, 2021 at 11:21 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> I pushed the clerical part of this -- namely the addition of\n> PublicationTable node and PublicationRelInfo struct.\n>\n\nOne point to note here is that we are developing a generic grammar for\npublications where not only tables but other objects like schema,\nsequences, etc. can be specified, see [1]. So, there is some overlap\nin the grammar modifications being made by this patch and the work\nbeing done in that other thread. As both the patches are being\ndeveloped at the same time, it might be better to be in sync,\notherwise, some of the work needs to be changed. I can see that in the\npatch [2] (v28-0002-Added-schema-level-support-for-publication) being\ndeveloped there the changes made by the above commit needs to be\nchanged again to represent a generic object for publication. It is\npossible that we can do it some other way but I think it would be\nbetter to coordinate the work in both threads. The other approach is\nto continue independently and the later patch can adapt to the earlier\none which is fine too but it might be more work for the later one.\n\n[1] - https://www.postgresql.org/message-id/877603.1629120678%40sss.pgh.pa.us\n[2] - postgresql.org/message-id/CALDaNm0OudeDeFN7bSWPro0hgKx%3D1zPgcNFWnvU_G6w3mDPX0Q%40mail.gmail.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 15 Sep 2021 09:04:21 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-15, Amit Kapila wrote:\n\n> On Mon, Sep 6, 2021 at 11:21 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> >\n> > I pushed the clerical part of this -- namely the addition of\n> > PublicationTable node and PublicationRelInfo struct.\n> \n> One point to note here is that we are developing a generic grammar for\n> publications where not only tables but other objects like schema,\n> sequences, etc. can be specified, see [1]. So, there is some overlap\n> in the grammar modifications being made by this patch and the work\n> being done in that other thread.\n\nOh rats. I was not aware of that thread, or indeed of the fact that\nadding multiple object types to publications was being considered.\n\nI do see that 0002 there contains gram.y changes, but AFAICS those\nchanges don't allow specifying a column list for a table, so there are\nsome changes needed in that patch for that either way.\n\nI agree that it's better to move forward in unison.\n\nI noticed that 0002 in that other patch uses a void * pointer in\nPublicationObjSpec that \"could be either RangeVar or String\", which\nstrikes me as a really bad idea. (Already discussed in some other\nthread recently, maybe this one or the row filtering one.)\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Wed, 15 Sep 2021 08:50:07 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Sep 15, 2021 at 5:20 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-15, Amit Kapila wrote:\n>\n> > On Mon, Sep 6, 2021 at 11:21 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > >\n> > > I pushed the clerical part of this -- namely the addition of\n> > > PublicationTable node and PublicationRelInfo struct.\n> >\n> > One point to note here is that we are developing a generic grammar for\n> > publications where not only tables but other objects like schema,\n> > sequences, etc. can be specified, see [1]. So, there is some overlap\n> > in the grammar modifications being made by this patch and the work\n> > being done in that other thread.\n>\n> Oh rats. I was not aware of that thread, or indeed of the fact that\n> adding multiple object types to publications was being considered.\n>\n> I do see that 0002 there contains gram.y changes, but AFAICS those\n> changes don't allow specifying a column list for a table, so there are\n> some changes needed in that patch for that either way.\n>\n> I agree that it's better to move forward in unison.\n>\n> I noticed that 0002 in that other patch uses a void * pointer in\n> PublicationObjSpec that \"could be either RangeVar or String\", which\n> strikes me as a really bad idea. (Already discussed in some other\n> thread recently, maybe this one or the row filtering one.)\n\nI have extracted the parser code and attached it here, so that it will\nbe easy to go through. We wanted to support the following syntax as in\n[1]:\nCREATE PUBLICATION pub1 FOR\nTABLE t1,t2,t3, ALL TABLES IN SCHEMA s1,s2,\nSEQUENCE seq1,seq2, ALL SEQUENCES IN SCHEMA s3,s4;\n\nColumns can be added to PublicationObjSpec data structure. The patch\nGeneric_object_type_parser_002_table_schema_publication.patch has the\nchanges that were used to handle the parsing. Schema and Relation both\nare different objects, schema is of string type and relation is of\nRangeVar type. While parsing, schema name is parsed in string format\nand relation is parsed and converted to rangevar type, these objects\nwill be then handled accordingly during post processing. That is the\nreason it used void * type which could hold both RangeVar and String.\nThoughts?\n\n[1] - https://www.postgresql.org/message-id/877603.1629120678%40sss.pgh.pa.us\n\nRegards,\nVignesh", "msg_date": "Wed, 15 Sep 2021 17:49:19 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-15, vignesh C wrote:\n\n> I have extracted the parser code and attached it here, so that it will\n> be easy to go through. We wanted to support the following syntax as in\n> [1]:\n> CREATE PUBLICATION pub1 FOR\n> TABLE t1,t2,t3, ALL TABLES IN SCHEMA s1,s2,\n> SEQUENCE seq1,seq2, ALL SEQUENCES IN SCHEMA s3,s4;\n\nOh, thanks, it looks like this can be useful. We can get the common\ngrammar done and then rebase all the other patches (I was also just told\nabout support for sequences in [1]) on top.\n\n[1] https://postgr.es/m/3d6df331-5532-6848-eb45-344b265e0238@enterprisedb.com\n\n> Columns can be added to PublicationObjSpec data structure.\n\nRight. (As a List of String, I imagine.)\n\n> The patch\n> Generic_object_type_parser_002_table_schema_publication.patch has the\n> changes that were used to handle the parsing. Schema and Relation both\n> are different objects, schema is of string type and relation is of\n> RangeVar type. While parsing, schema name is parsed in string format\n> and relation is parsed and converted to rangevar type, these objects\n> will be then handled accordingly during post processing.\n\nYeah, I think it'd be cleaner if the node type has two members, something like\nthis\n\ntypedef struct PublicationObjSpec\n{\n\tNodeTag\t\ttype;\n\tPublicationObjSpecType pubobjtype;\t/* type of this publication object */\n\tRangeVar\t*rv;\t\t/* if a table */\n\tString\t\t*objname;\t/* if a schema */\n\tint\t\tlocation;\t\t/* token location, or -1 if unknown */\n} PublicationObjSpec;\n\nand only one of them is set, the other is NULL, depending on the object type.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Wed, 15 Sep 2021 09:36:51 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Sep 15, 2021, at 9:19 AM, vignesh C wrote:\n> I have extracted the parser code and attached it here, so that it will\n> be easy to go through. We wanted to support the following syntax as in\n> [1]:\n> CREATE PUBLICATION pub1 FOR\n> TABLE t1,t2,t3, ALL TABLES IN SCHEMA s1,s2,\n> SEQUENCE seq1,seq2, ALL SEQUENCES IN SCHEMA s3,s4;\nI don't like this syntax. It seems too much syntax for the same purpose in a\nsingle command. If you look at GRANT command whose ALL TABLES IN SCHEMA syntax\nwas extracted, you can use ON TABLE or ON ALL TABLES IN SCHEMA; you cannot use\nboth. This proposal allows duplicate objects (of course, you can ignore it but\nthe current code prevent duplicates -- see publication_add_relation).\n\nIMO you should mimic the GRANT grammar and have multiple commands for row\nfiltering, column filtering, and ALL FOO IN SCHEMA. The filtering patches only\nuse the FOR TABLE syntax. The later won't have filtering syntax. Having said\nthat the grammar should be:\n\nCREATE PUBLICATION name\n [ FOR TABLE [ ONLY ] table_name [ * ] [ (column_name [, ...] ) ] [ WHERE (expression) ] [, ...]\n | FOR ALL TABLES\n | FOR ALL TABLES IN SCHEMA schema_name, [, ...]\n | FOR ALL SEQUENCES IN SCHEMA schema_name, [, ...] ]\n [ WITH ( publication_parameter [= value] [, ... ] ) ]\n\nALTER PUBLICATION name ADD TABLE [ ONLY ] table_name [ * ] [ (column_name [, ...] ) ] [ WHERE (expression) ]\nALTER PUBLICATION name ADD ALL TABLES IN SCHEMA schema_name, [, ...]\nALTER PUBLICATION name ADD ALL SEQUENCES IN SCHEMA schema_name, [, ...]\n\nALTER PUBLICATION name SET TABLE [ ONLY ] table_name [ * ] [ (column_name [, ...] ) ] [ WHERE (expression) ]\nALTER PUBLICATION name SET ALL TABLES IN SCHEMA schema_name, [, ...]\nALTER PUBLICATION name SET ALL SEQUENCES IN SCHEMA schema_name, [, ...]\n\nALTER PUBLICATION name DROP TABLE [ ONLY ] table_name [ * ]\nALTER PUBLICATION name DROP ALL TABLES IN SCHEMA schema_name, [, ...]\nALTER PUBLICATION name DROP ALL SEQUENCES IN SCHEMA schema_name, [, ...]\n\nOpinions?\n\n\n--\nEuler Taveira\nEDB https://www.enterprisedb.com/\n\nOn Wed, Sep 15, 2021, at 9:19 AM, vignesh C wrote:I have extracted the parser code and attached it here, so that it willbe easy to go through. We wanted to support the following syntax as in[1]:CREATE PUBLICATION pub1 FORTABLE t1,t2,t3, ALL TABLES IN SCHEMA s1,s2,SEQUENCE seq1,seq2, ALL SEQUENCES IN SCHEMA s3,s4;I don't like this syntax. It seems too much syntax for the same purpose in asingle command. If you look at GRANT command whose ALL TABLES IN SCHEMA syntaxwas extracted, you can use ON TABLE or ON ALL TABLES IN SCHEMA; you cannot useboth. This proposal allows duplicate objects (of course, you can ignore it butthe current code prevent duplicates -- see publication_add_relation).IMO you should mimic the GRANT grammar and have multiple commands for rowfiltering, column filtering, and ALL FOO IN SCHEMA. The filtering patches onlyuse the FOR TABLE syntax. The later won't have filtering syntax. Having saidthat the grammar should be:CREATE PUBLICATION name    [ FOR TABLE [ ONLY ] table_name [ * ] [ (column_name [, ...] ) ] [ WHERE (expression) ] [, ...]      | FOR ALL TABLES      | FOR ALL TABLES IN SCHEMA schema_name, [, ...]      | FOR ALL SEQUENCES IN SCHEMA schema_name, [, ...] ]    [ WITH ( publication_parameter [= value] [, ... ] ) ]ALTER PUBLICATION name ADD TABLE [ ONLY ] table_name [ * ] [ (column_name [, ...] ) ] [ WHERE (expression) ]ALTER PUBLICATION name ADD ALL TABLES IN SCHEMA schema_name, [, ...]ALTER PUBLICATION name ADD ALL SEQUENCES IN SCHEMA schema_name, [, ...]ALTER PUBLICATION name SET TABLE [ ONLY ] table_name [ * ] [ (column_name [, ...] ) ] [ WHERE (expression) ]ALTER PUBLICATION name SET ALL TABLES IN SCHEMA schema_name, [, ...]ALTER PUBLICATION name SET ALL SEQUENCES IN SCHEMA schema_name, [, ...]ALTER PUBLICATION name DROP TABLE [ ONLY ] table_name [ * ]ALTER PUBLICATION name DROP ALL TABLES IN SCHEMA schema_name, [, ...]ALTER PUBLICATION name DROP ALL SEQUENCES IN SCHEMA schema_name, [, ...]Opinions?--Euler TaveiraEDB   https://www.enterprisedb.com/", "msg_date": "Wed, 15 Sep 2021 11:46:34 -0300", "msg_from": "\"Euler Taveira\" <euler@eulerto.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wednesday, September 15, 2021 8:19 PM vignesh C <vignesh21@gmail.com> wrote:\r\n> I have extracted the parser code and attached it here, so that it will be easy to\r\n> go through. We wanted to support the following syntax as in\r\n> [1]:\r\n> CREATE PUBLICATION pub1 FOR\r\n> TABLE t1,t2,t3, ALL TABLES IN SCHEMA s1,s2, SEQUENCE seq1,seq2, ALL\r\n> SEQUENCES IN SCHEMA s3,s4;\r\n\r\nI am +1 for this syntax.\r\n\r\nThis syntax is more flexible than adding or dropping different type objects in\r\nseparate commands. User can either use one single command to add serval different\r\nobjects or use serval commands to add each type objects.\r\n\r\nBest regards,\r\nHou zj\r\n", "msg_date": "Thu, 16 Sep 2021 02:06:09 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, Sep 7, 2021 at 3:51 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n...\n> I pushed the clerical part of this -- namely the addition of\n> PublicationTable node and PublicationRelInfo struct. I attach the part\n> of your v4 patch that I didn't include. It contains a couple of small\n> corrections, but I didn't do anything invasive (such as pgindent)\n> because that would perhaps cause you too much merge pain.\n\nI noticed that the latest v5 no longer includes the TAP test which was\nin the v4 patch.\n\n(src/test/subscription/t/021_column_filter.pl)\n\nWas that omission deliberate?\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia.\n\n\n", "msg_date": "Thu, 16 Sep 2021 13:06:58 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Sep 15, 2021 at 6:06 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-15, vignesh C wrote:\n> > The patch\n> > Generic_object_type_parser_002_table_schema_publication.patch has the\n> > changes that were used to handle the parsing. Schema and Relation both\n> > are different objects, schema is of string type and relation is of\n> > RangeVar type. While parsing, schema name is parsed in string format\n> > and relation is parsed and converted to rangevar type, these objects\n> > will be then handled accordingly during post processing.\n>\n> Yeah, I think it'd be cleaner if the node type has two members, something like\n> this\n>\n> typedef struct PublicationObjSpec\n> {\n> NodeTag type;\n> PublicationObjSpecType pubobjtype; /* type of this publication object */\n> RangeVar *rv; /* if a table */\n> String *objname; /* if a schema */\n> int location; /* token location, or -1 if unknown */\n> } PublicationObjSpec;\n>\n> and only one of them is set, the other is NULL, depending on the object type.\n>\n\nI think the problem here is that with the proposed grammar we won't be\nalways able to distinguish names at the gram.y stage. Some post\nparsing analysis is required to attribute the right type to name as is\ndone in the patch. The same seems to be indicated by Tom in his email\nas well where he has proposed this syntax [1]. Also, something similar\nis done for privilege_target (GRANT syntax) where we have a list of\nobjects but here the story is slightly more advanced because we are\nplanning to allow specifying multiple objects in one command. One\nmight think that we can identify each type of objects lists separately\nbut that gives grammar conflicts as it is not able to identify whether\nthe comma ',' is used for the same type object or for the next type.\nDue to which we need to come up with a generic object for names to\nwhich we attribute the right type in post parse analysis. Now, I think\ninstead of void *, it might be better to use Node * for generic\nobjects unless we have some problem.\n\n[1] - https://www.postgresql.org/message-id/877603.1629120678%40sss.pgh.pa.us\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 16 Sep 2021 08:45:15 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Sep 15, 2021 at 8:19 PM Euler Taveira <euler@eulerto.com> wrote:\n>\n> On Wed, Sep 15, 2021, at 9:19 AM, vignesh C wrote:\n>\n> I have extracted the parser code and attached it here, so that it will\n> be easy to go through. We wanted to support the following syntax as in\n> [1]:\n> CREATE PUBLICATION pub1 FOR\n> TABLE t1,t2,t3, ALL TABLES IN SCHEMA s1,s2,\n> SEQUENCE seq1,seq2, ALL SEQUENCES IN SCHEMA s3,s4;\n>\n> I don't like this syntax. It seems too much syntax for the same purpose in a\n> single command. If you look at GRANT command whose ALL TABLES IN SCHEMA syntax\n> was extracted, you can use ON TABLE or ON ALL TABLES IN SCHEMA; you cannot use\n> both. This proposal allows duplicate objects (of course, you can ignore it but\n> the current code prevent duplicates -- see publication_add_relation).\n>\n> IMO you should mimic the GRANT grammar and have multiple commands for row\n> filtering, column filtering, and ALL FOO IN SCHEMA. The filtering patches only\n> use the FOR TABLE syntax. The later won't have filtering syntax.\n>\n\nSure, but we don't prevent if the user uses only FOR TABLE variant.\nOTOH, it is better to provide flexibility to allow multiple objects in\none command unless that is not feasible. It saves the effort of users\nin many cases. In short, +1 for the syntax where multiple objects can\nbe allowed.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 16 Sep 2021 08:52:56 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Sep 16, 2021 at 8:45 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Wed, Sep 15, 2021 at 6:06 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> >\n> > On 2021-Sep-15, vignesh C wrote:\n> > > The patch\n> > > Generic_object_type_parser_002_table_schema_publication.patch has the\n> > > changes that were used to handle the parsing. Schema and Relation both\n> > > are different objects, schema is of string type and relation is of\n> > > RangeVar type. While parsing, schema name is parsed in string format\n> > > and relation is parsed and converted to rangevar type, these objects\n> > > will be then handled accordingly during post processing.\n> >\n> > Yeah, I think it'd be cleaner if the node type has two members, something like\n> > this\n> >\n> > typedef struct PublicationObjSpec\n> > {\n> > NodeTag type;\n> > PublicationObjSpecType pubobjtype; /* type of this publication object */\n> > RangeVar *rv; /* if a table */\n> > String *objname; /* if a schema */\n> > int location; /* token location, or -1 if unknown */\n> > } PublicationObjSpec;\n> >\n> > and only one of them is set, the other is NULL, depending on the object type.\n> >\n>\n> I think the problem here is that with the proposed grammar we won't be\n> always able to distinguish names at the gram.y stage.\n\nThis is the issue that Amit was talking about:\ngram.y: error: shift/reduce conflicts: 2 found, 0 expected\ngram.y: warning: shift/reduce conflict on token ',' [-Wcounterexamples]\n First example: CREATE PUBLICATION name FOR TABLE relation_expr_list\n• ',' relation_expr ',' PublicationObjSpec opt_definition $end\n Shift derivation\n $accept\n ↳ parse_toplevel\n $end\n ↳ stmtmulti\n ↳ toplevel_stmt\n ↳ stmt\n ↳ CreatePublicationStmt\n ↳ CREATE PUBLICATION name FOR pub_obj_list\n opt_definition\n ↳ PublicationObjSpec\n ',' PublicationObjSpec\n ↳ TABLE relation_expr_list\n ↳\nrelation_expr_list • ',' relation_expr\n Second example: CREATE PUBLICATION name FOR TABLE relation_expr_list\n• ',' PublicationObjSpec opt_definition $end\n Reduce derivation\n $accept\n ↳ parse_toplevel\n $end\n ↳ stmtmulti\n ↳ toplevel_stmt\n ↳ stmt\n ↳ CreatePublicationStmt\n ↳ CREATE PUBLICATION name FOR pub_obj_list\n opt_definition\n ↳ pub_obj_list\n ',' PublicationObjSpec\n ↳ PublicationObjSpec\n ↳ TABLE relation_expr_list •\nHere it is not able to distinguish if ',' is used for the next table\nname or the next object.\nI was able to reproduce this issue with the attached patch.\n\nRegards,\nVignesh", "msg_date": "Thu, 16 Sep 2021 10:35:21 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-16, Amit Kapila wrote:\n\n> I think the problem here is that with the proposed grammar we won't be\n> always able to distinguish names at the gram.y stage. Some post\n> parsing analysis is required to attribute the right type to name as is\n> done in the patch.\n\nDoesn't it work to stuff them all into RangeVars? Then you don't need\nto make the node type a monstrosity, just bail out in parse analysis if\nan object spec has more elements in the RV than the object type allows.\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Thu, 16 Sep 2021 09:44:35 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Sep 16, 2021 at 6:14 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-16, Amit Kapila wrote:\n>\n> > I think the problem here is that with the proposed grammar we won't be\n> > always able to distinguish names at the gram.y stage. Some post\n> > parsing analysis is required to attribute the right type to name as is\n> > done in the patch.\n>\n> Doesn't it work to stuff them all into RangeVars? Then you don't need\n> to make the node type a monstrosity, just bail out in parse analysis if\n> an object spec has more elements in the RV than the object type allows.\n>\n\nSo, are you suggesting that we store even schema names corresponding\nto FOR ALL TABLES IN SCHEMA s1 [, ...] grammar in RangeVars in some\nway (say store schema name in relname or schemaname field of RangeVar)\nat gram.y stage and then later extract it from RangeVar? If so, why do\nyou think it would be better than the current proposed way?\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 16 Sep 2021 19:18:29 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-16, vignesh C wrote:\n\n> diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y\n> index e3068a374e..c50bb570ea 100644\n> --- a/src/backend/parser/gram.y\n> +++ b/src/backend/parser/gram.y\n\nYeah, on a quick glance this looks all wrong. Your PublicationObjSpec\nproduction should return a node with tag PublicationObjSpec, and\npubobj_expr should not exist at all -- that stuff is just making it all\nmore confusing.\n\nI think it'd be something like this:\n\nPublicationObjSpec:\t\n\t\t\tALL TABLES\n\t\t\t\t\t{\n\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_ALL_TABLES;\n\t\t\t\t\t\t$$->location = @1;\n\t\t\t\t\t}\n\t\t\t| TABLE qualified_name\n\t\t\t\t\t{\n\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_TABLE;\n\t\t\t\t\t\t$$->pubobj = $2;\n\t\t\t\t\t\t$$->location = @1;\n\t\t\t\t\t}\n\t\t\t| ALL TABLES IN_P SCHEMA name\n\t\t\t\t\t{\n\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_ALL_TABLES_IN_SCHEMA;\n\t\t\t\t\t\t$$->pubobj = makeRangeVar( ... $5 ... );\n\t\t\t\t\t\t$$->location = @1;\n\t\t\t\t\t}\n\t\t\t| qualified_name\n\t\t\t\t\t{\n\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\n\t\t\t\t\t\t$$->pubobj = $1;\n\t\t\t\t\t\t$$->location = @1;\n\t\t\t\t\t};\n\nYou need a single object name under TABLE, not a list -- this was Tom's\npoint about needing post-processing to determine how to assign a type to\na object that's what I named PUBLICATIONOBJ_CONTINUATION here.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"Puedes vivir sólo una vez, pero si lo haces bien, una vez es suficiente\"\n\n\n", "msg_date": "Thu, 16 Sep 2021 10:50:25 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-16, Alvaro Herrera wrote:\n\nActually, something like this might be better:\n\n> PublicationObjSpec:\t\n\n> \t\t\t| TABLE qualified_name\n> \t\t\t\t\t{\n> \t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n> \t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_TABLE;\n> \t\t\t\t\t\t$$->pubrvobj = $2;\n> \t\t\t\t\t\t$$->location = @1;\n> \t\t\t\t\t}\n> \t\t\t| ALL TABLES IN_P SCHEMA name\n> \t\t\t\t\t{\n> \t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n> \t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_ALL_TABLES_IN_SCHEMA;\n> \t\t\t\t\t\t$$->pubplainobj = $5;\n> \t\t\t\t\t\t$$->location = @1;\n> \t\t\t\t\t}\n\nSo you don't have to cram the schema name in a RangeVar, which would\nindeed be quite awkward. (I'm sure you can come up with better names\nfor the struct members there ...)\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"Porque francamente, si para saber manejarse a uno mismo hubiera que\nrendir examen... ¿Quién es el machito que tendría carnet?\" (Mafalda)\n\n\n", "msg_date": "Thu, 16 Sep 2021 11:36:32 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Sep 16, 2021 at 7:20 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-16, vignesh C wrote:\n>\n> > diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y\n> > index e3068a374e..c50bb570ea 100644\n> > --- a/src/backend/parser/gram.y\n> > +++ b/src/backend/parser/gram.y\n>\n> Yeah, on a quick glance this looks all wrong. Your PublicationObjSpec\n> production should return a node with tag PublicationObjSpec, and\n> pubobj_expr should not exist at all -- that stuff is just making it all\n> more confusing.\n>\n> I think it'd be something like this:\n>\n> PublicationObjSpec:\n> ALL TABLES\n> {\n> $$ = makeNode(PublicationObjSpec);\n> $$->pubobjtype = PUBLICATIONOBJ_ALL_TABLES;\n> $$->location = @1;\n> }\n> | TABLE qualified_name\n> {\n> $$ = makeNode(PublicationObjSpec);\n> $$->pubobjtype = PUBLICATIONOBJ_TABLE;\n> $$->pubobj = $2;\n> $$->location = @1;\n> }\n> | ALL TABLES IN_P SCHEMA name\n> {\n> $$ = makeNode(PublicationObjSpec);\n> $$->pubobjtype = PUBLICATIONOBJ_ALL_TABLES_IN_SCHEMA;\n> $$->pubobj = makeRangeVar( ... $5 ... );\n> $$->location = @1;\n> }\n> | qualified_name\n> {\n> $$ = makeNode(PublicationObjSpec);\n> $$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\n> $$->pubobj = $1;\n> $$->location = @1;\n> };\n>\n> You need a single object name under TABLE, not a list -- this was Tom's\n> point about needing post-processing to determine how to assign a type to\n> a object that's what I named PUBLICATIONOBJ_CONTINUATION here.\n\nIn the above, we will not be able to use qualified_name, as\nqualified_name will not support the following syntaxes:\ncreate publication pub1 for table t1 *;\ncreate publication pub1 for table ONLY t1 *;\ncreate publication pub1 for table ONLY (t1);\n\nTo solve this problem we can change qualified_name to relation_expr\nbut the problem with doing that is that the user will be able to\nprovide the following syntaxes:\ncreate publication pub1 for all tables in schema sch1 *;\ncreate publication pub1 for all tables in schema ONLY sch1 *;\ncreate publication pub1 for all tables in schema ONLY (sch1);\n\nTo handle this we will need some special flag which will differentiate\nthese and throw errors at post processing time. We need to define an\nexpression similar to relation_expr say pub_expr which handles all\nvariants of qualified_name and then use a special flag so that we can\nthrow an error if somebody uses the above type of syntax for schema\nnames. And then if we have to distinguish between schema name and\nrelation name variant, then we need few other things.\n\nWe proposed the below solution which handles all these problems and\nalso used Node type which need not store schemaname in RangeVar type:\npubobj_expr:\n pubobj_name\n {\n /* inheritance query, implicitly */\n $$ = makeNode(PublicationObjSpec);\n $$->object = $1;\n }\n | extended_relation_expr\n {\n $$ = makeNode(PublicationObjSpec);\n $$->object = (Node *)$1;\n }\n | CURRENT_SCHEMA\n {\n $$ = makeNode(PublicationObjSpec);\n $$->object = (Node\n*)makeString(\"CURRENT_SCHEMA\");\n }\n ;\n/* This can be either a schema or relation name. */\npubobj_name:\n ColId\n {\n $$ = (Node *) makeString($1);\n }\n | ColId indirection\n {\n $$ = (Node *)\nmakeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\n }\n ;\n/* FOR TABLE and FOR ALL TABLES IN SCHEMA specifications */\nPublicationObjSpec: TABLE pubobj_expr\n {\n $$ = $2;\n $$->pubobjtype =\nPUBLICATIONOBJ_TABLE;\n $$->location = @1;\n }\n | ALL TABLES IN_P SCHEMA pubobj_expr\n {\n $$ = $5;\n $$->pubobjtype =\nPUBLICATIONOBJ_REL_IN_SCHEMA;\n $$->location = @1;\n }\n | pubobj_expr\n {\n $$ = $1;\n $$->pubobjtype =\nPUBLICATIONOBJ_UNKNOWN;\n $$->location = @1;\n }\n ;\n\nThe same has been proposed in the recent version of patch [1].\n[1] - https://www.postgresql.org/message-id/CALDaNm0OudeDeFN7bSWPro0hgKx%3D1zPgcNFWnvU_G6w3mDPX0Q%40mail.gmail.com\nThoughts?\n\nRegards,\nVignesh\n\n\n", "msg_date": "Fri, 17 Sep 2021 09:36:41 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thurs, Sep 16, 2021 10:37 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\r\n> On 2021-Sep-16, Alvaro Herrera wrote:\r\n> \r\n> Actually, something like this might be better:\r\n> \r\n> > PublicationObjSpec:\r\n> \r\n> >\t\t\t| TABLE qualified_name\r\n> >\t\t\t\t\t{\r\n> >\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\r\n> >\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_TABLE;\r\n> >\t\t\t\t\t\t$$->pubrvobj = $2;\r\n> >\t\t\t\t\t\t$$->location = @1;\r\n> >\t\t\t\t\t}\r\n> >\t\t\t| ALL TABLES IN_P SCHEMA name\r\n> >\t\t\t\t\t{\r\n> >\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\r\n> >\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_ALL_TABLES_IN_SCHEMA;\r\n> >\t\t\t\t\t\t$$->pubplainobj = $5;\r\n> >\t\t\t\t\t\t$$->location = @1;\r\n> >\t\t\t\t\t}\r\n> So you don't have to cram the schema name in a RangeVar, which would indeed\r\n> be quite awkward. (I'm sure you can come up with better names for the struct\r\n> members there ...)> \r\n\r\nDid you mean something like the following ?\r\n-----\r\nPublicationObjSpec:\r\n\t\tTABLE qualified_name {...}\r\n\t\t| ALL TABLES IN_P SCHEMA name {...}\r\n\t\t;\r\n\r\npub_obj_list:\r\n\t\tPublicationObjSpec\r\n | pub_obj_list ',' PublicationObjSpec\r\n-----\r\n\r\nIf so, I think it only supports syntaxes like \"TABLE a, TABLE b, TABLE c\" while\r\nwe cannnot use \"TABLE a,b,c\". To support multiple objects, we need a bare name\r\nin PublicationObjSpec.\r\n\r\nOr Did you mean something like this ?\r\n-----\r\nPublicationObjSpec:\r\n\t\tTABLE qualified_name {...}\r\n\t\t| ALL TABLES IN_P SCHEMA name {...}\r\n\t\t| qualified_name {...}\r\n\t\t;\r\n-----\r\n\r\nI think this doesn't support relation expression like \"table */ONLY table/ONLY\r\n(table)\" as memtioned by Vignesh [1].\r\n\r\nThoughts ?\r\n\r\n[1] https://www.postgresql.org/message-id/CALDaNm06%3DLDytYyY%2BxcAQd8UK_YpJ3zMo4P5V8KBArw6MoDWDg%40mail.gmail.com\r\n\r\nBest regards,\r\nHou zj\r\n", "msg_date": "Fri, 17 Sep 2021 05:08:36 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Sep 17, 2021 at 9:36 AM vignesh C <vignesh21@gmail.com> wrote:\n>\n> On Thu, Sep 16, 2021 at 7:20 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> >\n> > On 2021-Sep-16, vignesh C wrote:\n> >\n> > > diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y\n> > > index e3068a374e..c50bb570ea 100644\n> > > --- a/src/backend/parser/gram.y\n> > > +++ b/src/backend/parser/gram.y\n> >\n> > Yeah, on a quick glance this looks all wrong. Your PublicationObjSpec\n> > production should return a node with tag PublicationObjSpec, and\n> > pubobj_expr should not exist at all -- that stuff is just making it all\n> > more confusing.\n> >\n> > I think it'd be something like this:\n> >\n> > PublicationObjSpec:\n> > ALL TABLES\n> > {\n> > $$ = makeNode(PublicationObjSpec);\n> > $$->pubobjtype = PUBLICATIONOBJ_ALL_TABLES;\n> > $$->location = @1;\n> > }\n> > | TABLE qualified_name\n> > {\n> > $$ = makeNode(PublicationObjSpec);\n> > $$->pubobjtype = PUBLICATIONOBJ_TABLE;\n> > $$->pubobj = $2;\n> > $$->location = @1;\n> > }\n> > | ALL TABLES IN_P SCHEMA name\n> > {\n> > $$ = makeNode(PublicationObjSpec);\n> > $$->pubobjtype = PUBLICATIONOBJ_ALL_TABLES_IN_SCHEMA;\n> > $$->pubobj = makeRangeVar( ... $5 ... );\n> > $$->location = @1;\n> > }\n> > | qualified_name\n> > {\n> > $$ = makeNode(PublicationObjSpec);\n> > $$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\n> > $$->pubobj = $1;\n> > $$->location = @1;\n> > };\n> >\n> > You need a single object name under TABLE, not a list -- this was Tom's\n> > point about needing post-processing to determine how to assign a type to\n> > a object that's what I named PUBLICATIONOBJ_CONTINUATION here.\n>\n> In the above, we will not be able to use qualified_name, as\n> qualified_name will not support the following syntaxes:\n> create publication pub1 for table t1 *;\n> create publication pub1 for table ONLY t1 *;\n> create publication pub1 for table ONLY (t1);\n>\n> To solve this problem we can change qualified_name to relation_expr\n> but the problem with doing that is that the user will be able to\n> provide the following syntaxes:\n> create publication pub1 for all tables in schema sch1 *;\n> create publication pub1 for all tables in schema ONLY sch1 *;\n> create publication pub1 for all tables in schema ONLY (sch1);\n>\n> To handle this we will need some special flag which will differentiate\n> these and throw errors at post processing time. We need to define an\n> expression similar to relation_expr say pub_expr which handles all\n> variants of qualified_name and then use a special flag so that we can\n> throw an error if somebody uses the above type of syntax for schema\n> names. And then if we have to distinguish between schema name and\n> relation name variant, then we need few other things.\n>\n> We proposed the below solution which handles all these problems and\n> also used Node type which need not store schemaname in RangeVar type:\n>\n\nAlvaro, do you have any thoughts on these proposed grammar changes?\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 23 Sep 2021 15:48:57 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi,\n\nI wanted to do a review of this patch, but I'm a bit confused about \nwhich patch(es) to review. There's the v5 patch, and then these two \npatches - which seem to be somewhat duplicate, though.\n\nCan anyone explain what's the \"current\" patch version, or perhaps tell \nme which of the patches to combine?\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Thu, 23 Sep 2021 21:15:12 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Sep 24, 2021 at 12:45 AM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> Hi,\n>\n> I wanted to do a review of this patch, but I'm a bit confused about\n> which patch(es) to review. There's the v5 patch, and then these two\n> patches - which seem to be somewhat duplicate, though.\n>\n> Can anyone explain what's the \"current\" patch version, or perhaps tell\n> me which of the patches to combine?\n>\n\nI think v5 won't work atop a common grammar patch. There need some\nadjustments in v5. I think it would be good if we can first get the\ncommon grammar patch reviewed/committed and then build this on top of\nit. The common grammar and the corresponding implementation are being\naccomplished in the Schema support patch, the latest version of which\nis at [1]. Now, Vignesh seems to have extracted just the grammar\nportion of that work in his patch\nGeneric_object_type_parser_002_table_schema_publication [2] (there are\nsome changes after that but not anything fundamentally different till\nnow) then he seems to have prepared a patch\n(Generic_object_type_parser_001_table_publication [2]) on similar\nlines only for tables.\n\n[1] - https://www.postgresql.org/message-id/OS3PR01MB571844A87B6A83B7C10F9D6B94A39%40OS3PR01MB5718.jpnprd01.prod.outlook.com\n[2] - https://www.postgresql.org/message-id/CALDaNm1YoxJCs%3DuiyPM%3DtFDDc2qn0ja01nb2TCPqrjZH2jR0sQ%40mail.gmail.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 24 Sep 2021 08:40:30 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Sep 24, 2021 at 8:40 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Fri, Sep 24, 2021 at 12:45 AM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n> >\n> > Hi,\n> >\n> > I wanted to do a review of this patch, but I'm a bit confused about\n> > which patch(es) to review. There's the v5 patch, and then these two\n> > patches - which seem to be somewhat duplicate, though.\n> >\n> > Can anyone explain what's the \"current\" patch version, or perhaps tell\n> > me which of the patches to combine?\n> >\n>\n> I think v5 won't work atop a common grammar patch. There need some\n> adjustments in v5. I think it would be good if we can first get the\n> common grammar patch reviewed/committed and then build this on top of\n> it. The common grammar and the corresponding implementation are being\n> accomplished in the Schema support patch, the latest version of which\n> is at [1].\n\nI have posted an updated patch with the fixes at [1], please review\nthe updated patch.\n[1] - https://www.postgresql.org/message-id/CALDaNm1R-xbQvz4LU5OXu3KKwbWOz3uDcT_YjRU6V0R5FZDYDg%40mail.gmail.com\n\nRegards,\nVignesh\n\n\n", "msg_date": "Fri, 24 Sep 2021 10:35:29 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-23, Amit Kapila wrote:\n\n> Alvaro, do you have any thoughts on these proposed grammar changes?\n\nYeah, I think pubobj_name remains a problem in that you don't know its\nreturn type -- could be a String or a RangeVar, and the user of that\nproduction can't distinguish. So you're still (unnecessarily, IMV)\nstashing an object of undetermined type into ->object.\n\nI think you should get rid of both pubobj_name and pubobj_expr and do\nsomethine like this:\n\n/* FOR TABLE and FOR ALL TABLES IN SCHEMA specifications */\nPublicationObjSpec:\tTABLE ColId\n\t\t\t\t\t{\n\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_TABLE;\n\t\t\t\t\t\t$$->rangevar = makeRangeVarFromQualifiedName($1, NULL, @1, yyscanner);\n\t\t\t\t\t}\n\t\t\t| TABLE ColId indirection\n\t\t\t\t\t{\n\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_TABLE;\n\t\t\t\t\t\t$$->rangevar = makeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\n\t\t\t\t\t}\n\t\t\t| ALL TABLES IN_P SCHEMA ColId\n\t\t\t\t\t{\n\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_REL_IN_SCHEMA;\n\t\t\t\t\t\t$$->name = $4;\n\t\t\t\t\t}\n\t\t\t| ALL TABLES IN_P SCHEMA CURRENT_SCHEMA\t/* XXX should this be \"IN_P CURRENT_SCHEMA\"? */\n\t\t\t\t\t{\n\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_CURRSCHEMA;\n\t\t\t\t\t\t$$->name = $4;\n\t\t\t\t\t}\n\t\t\t| ColId\n\t\t\t\t\t{\n\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n\t\t\t\t\t\t$$->name = $1;\n\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\n\t\t\t\t\t}\n\t\t\t| ColId indirection\n\t\t\t\t\t{\n\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n\t\t\t\t\t\t$$->rangevar = makeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\n\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\n\t\t\t\t\t}\n\t\t\t| CURRENT_SCHEMA\n\t\t\t\t\t{\n\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\n\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_CURRSCHEMA;\n\t\t\t\t\t}\n\t\t;\n\nso in AlterPublicationStmt you would have stanzas like\n\n\t\t\t| ALTER PUBLICATION name ADD_P pub_obj_list\n\t\t\t\t{\n\t\t\t\t\tAlterPublicationStmt *n = makeNode(AlterPublicationStmt);\n\t\t\t\t\tn->pubname = $3;\n\t\t\t\t\tn->pubobjects = preprocess_pubobj_list($5);\n\t\t\t\t\tn->action = DEFELEM_ADD;\n\t\t\t\t\t$$ = (Node *)n;\n\t\t\t\t}\n\nwhere preprocess_pubobj_list (defined right after processCASbits and\nsomewhat mimicking it and SplitColQualList) takes all\nPUBLICATIONOBJ_CONTINUATION and turns them into either\nPUBLICATIONOBJ_TABLE entries or PUBLICATIONOBJ_REL_IN_SCHEMA entries,\ndepending on what the previous entry was. (And of course if there is no\nprevious entry, raise an error immediately). Note that node\nPublicationObjSpec now has two fields, one for RangeVar and another for\na plain name, and tables always use the second one, except when they are\ncontinuations, but of course those continuations that use name are\nturned into rangevars in the preprocess step. I think that would make\nthe code in ObjectsInPublicationToOids less messy.\n\n(I don't think using the string \"CURRENT_SCHEMA\" is a great solution.\nDid you try having a schema named CURRENT_SCHEMA?)\n\nI verified that bison is happy with the grammar I proposed; I also\nverified that you can add opt_column_list to the stanzas for tables, and\nit remains happy.\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\nY una voz del caos me habló y me dijo\n\"Sonríe y sé feliz, podría ser peor\".\nY sonreí. Y fui feliz.\nY fue peor.\n\n\n", "msg_date": "Fri, 24 Sep 2021 10:25:09 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 9/24/21 7:05 AM, vignesh C wrote:\n> On Fri, Sep 24, 2021 at 8:40 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>>\n>> On Fri, Sep 24, 2021 at 12:45 AM Tomas Vondra\n>> <tomas.vondra@enterprisedb.com> wrote:\n>>>\n>>> Hi,\n>>>\n>>> I wanted to do a review of this patch, but I'm a bit confused about\n>>> which patch(es) to review. There's the v5 patch, and then these two\n>>> patches - which seem to be somewhat duplicate, though.\n>>>\n>>> Can anyone explain what's the \"current\" patch version, or perhaps tell\n>>> me which of the patches to combine?\n>>>\n>>\n>> I think v5 won't work atop a common grammar patch. There need some\n>> adjustments in v5. I think it would be good if we can first get the\n>> common grammar patch reviewed/committed and then build this on top of\n>> it. The common grammar and the corresponding implementation are being\n>> accomplished in the Schema support patch, the latest version of which\n>> is at [1].\n> \n> I have posted an updated patch with the fixes at [1], please review\n> the updated patch.\n> [1] - https://www.postgresql.org/message-id/CALDaNm1R-xbQvz4LU5OXu3KKwbWOz3uDcT_YjRU6V0R5FZDYDg%40mail.gmail.com\n> \n\nBut that's not the column filtering patch, right? Why would this patch \ndepend on \"schema level support\", but maybe the consensus is there's \nsome common part that we need to get in first?\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Fri, 24 Sep 2021 22:54:40 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-24, Tomas Vondra wrote:\n\n> But that's not the column filtering patch, right? Why would this patch\n> depend on \"schema level support\", but maybe the consensus is there's some\n> common part that we need to get in first?\n\nYes, the grammar needs to be common. I posted a proposed grammar in\nhttps://www.postgresql.org/message-id/202109241325.eag5g6mpvoup%40alvherre.pgsql\n(this thread) which should serve both. I forgot to test the addition of\na WHERE clause for row filtering, though, and I didn't think to look at\nadding SEQUENCE support either.\n\n(I'm not sure what's going to be the proposal regarding FOR ALL TABLES\nIN SCHEMA for sequences. Are we going to have \"FOR ALL SEQUENCES IN\nSCHEMA\" and \"FOR ALL TABLES AND SEQUENCES IN SCHEMA\"?)\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\nThou shalt study thy libraries and strive not to reinvent them without\ncause, that thy code may be short and readable and thy days pleasant\nand productive. (7th Commandment for C Programmers)\n\n\n", "msg_date": "Fri, 24 Sep 2021 19:24:19 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 9/25/21 12:24 AM, Alvaro Herrera wrote:\n> On 2021-Sep-24, Tomas Vondra wrote:\n> \n>> But that's not the column filtering patch, right? Why would this patch\n>> depend on \"schema level support\", but maybe the consensus is there's some\n>> common part that we need to get in first?\n> \n> Yes, the grammar needs to be common. I posted a proposed grammar in\n> https://www.postgresql.org/message-id/202109241325.eag5g6mpvoup%40alvherre.pgsql\n> (this thread) which should serve both. I forgot to test the addition of\n> a WHERE clause for row filtering, though, and I didn't think to look at\n> adding SEQUENCE support either.\n> \n\nFine with me, but I still don't know which version of the column \nfiltering patch should I look at ... maybe there's none up to date, at \nthe moment?\n\n> (I'm not sure what's going to be the proposal regarding FOR ALL TABLES\n> IN SCHEMA for sequences. Are we going to have \"FOR ALL SEQUENCES IN\n> SCHEMA\" and \"FOR ALL TABLES AND SEQUENCES IN SCHEMA\"?)\n> \n\nShould be \"FOR ABSOLUTELY EVERYTHING IN SCHEMA\" of course ;-)\n\nOn a more serious note, a comma-separated list of objects seems like the \nbest / most flexible choice, i.e. \"FOR TABLES, SEQUENCES IN SCHEMA\"?\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sat, 25 Sep 2021 00:30:12 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-25, Tomas Vondra wrote:\n\n> On 9/25/21 12:24 AM, Alvaro Herrera wrote:\n> > On 2021-Sep-24, Tomas Vondra wrote:\n> > \n> > > But that's not the column filtering patch, right? Why would this patch\n> > > depend on \"schema level support\", but maybe the consensus is there's some\n> > > common part that we need to get in first?\n> > \n> > Yes, the grammar needs to be common. I posted a proposed grammar in\n> > https://www.postgresql.org/message-id/202109241325.eag5g6mpvoup%40alvherre.pgsql\n> > (this thread) which should serve both. I forgot to test the addition of\n> > a WHERE clause for row filtering, though, and I didn't think to look at\n> > adding SEQUENCE support either.\n> \n> Fine with me, but I still don't know which version of the column filtering\n> patch should I look at ... maybe there's none up to date, at the moment?\n\nI don't think there is one. I think the latest is what I posted in\nhttps://postgr.es/m/202109061751.3qz5xpugwx6w@alvherre.pgsql (At least I\ndon't see any reply from Rahila with attachments after that), but that\nwasn't addressing a bunch of review comments that had been made; and I\nsuspect that Amit K has already committed a few conflicting patches\nafter that.\n\n> > (I'm not sure what's going to be the proposal regarding FOR ALL TABLES\n> > IN SCHEMA for sequences. Are we going to have \"FOR ALL SEQUENCES IN\n> > SCHEMA\" and \"FOR ALL TABLES AND SEQUENCES IN SCHEMA\"?)\n> \n> Should be \"FOR ABSOLUTELY EVERYTHING IN SCHEMA\" of course ;-)\n\nhahah ...\n\n> On a more serious note, a comma-separated list of objects seems like the\n> best / most flexible choice, i.e. \"FOR TABLES, SEQUENCES IN SCHEMA\"?\n\nHmm, not sure if bison is going to like that. Maybe it's OK if\nSEQUENCES is a fully reserved word? But nothing beats experimentation!\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\nThou shalt check the array bounds of all strings (indeed, all arrays), for\nsurely where thou typest \"foo\" someone someday shall type\n\"supercalifragilisticexpialidocious\" (5th Commandment for C programmers)\n\n\n", "msg_date": "Fri, 24 Sep 2021 20:00:47 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "From Fri, Sep 24, 2021 9:25 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\r\n> On 2021-Sep-23, Amit Kapila wrote:\r\n> \r\n> > Alvaro, do you have any thoughts on these proposed grammar changes?\r\n> \r\n> Yeah, I think pubobj_name remains a problem in that you don't know its return\r\n> type -- could be a String or a RangeVar, and the user of that production can't\r\n> distinguish. So you're still (unnecessarily, IMV) stashing an object of\r\n> undetermined type into ->object.\r\n> \r\n> I think you should get rid of both pubobj_name and pubobj_expr and do\r\n> somethine like this:\r\n> PublicationObjSpec:\tTABLE ColId\r\n> \t\t\t\t\t{\r\n> \t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\r\n> \t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_TABLE;\r\n> \t\t\t\t\t\t$$->rangevar = makeRangeVarFromQualifiedName($1, NULL, @1, yyscanner);\r\n> \t\t\t\t\t}\r\n> \t\t\t| TABLE ColId indirection\r\n> \t\t\t\t\t{\r\n> \t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\r\n> \t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_TABLE;\r\n> \t\t\t\t\t\t$$->rangevar = makeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\r\n> \t\t\t\t\t}\r\n\r\nHi,\r\n\r\nIIRC, the above grammar doesn't support extended relation expression (like:\r\n\"tablename * \", \"ONLY tablename\", \"ONLY '( tablename )\") which is part of rule\r\nrelation_expr. I think we should add these too. And if we move forward with the\r\ndesign you proposed, we should do something like the following:\r\n\r\n/* FOR TABLE and FOR ALL TABLES IN SCHEMA specifications */\r\nPublicationObjSpec:\r\n\t\t\tTABLE relation_expr\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\r\n\t\t\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_TABLE;\r\n\t\t\t\t\t\t\t\t$$->rangevar = $2;\r\n\t\t\t\t\t\t\t}\r\n\t\t\t| ALL TABLES IN_P SCHEMA ColId\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\r\n\t\t\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_REL_IN_SCHEMA;\r\n\t\t\t\t\t\t\t\t$$->name = $5;\r\n\t\t\t\t\t\t\t}\r\n\t\t\t| ALL TABLES IN_P SCHEMA CURRENT_SCHEMA\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\r\n\t\t\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_CURRSCHEMA;\r\n\t\t\t\t\t\t\t\t$$->name = $5;\r\n\t\t\t\t\t\t\t}\r\n\t\t\t| extended_relation_expr\t/* grammar like tablename * , ONLY tablename, ONLY ( tablename )*/\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\r\n\t\t\t\t\t\t\t\t$$->rangevar = makeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\r\n\t\t\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\r\n\t\t\t\t\t\t\t}\r\n\t\t\t| ColId\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\r\n\t\t\t\t\t\t\t\t$$->name = $1;\r\n\t\t\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\r\n\r\n\t\t\t\t\t\t\t}\r\n\t\t\t| ColId indirection\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t$$ = makeNode(PublicationObjSpec);\r\n\t\t\t\t\t\t\t\t$$->rangevar = makeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\r\n\t\t\t\t\t\t\t\t$$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\r\n\r\n\t\t\t\t\t\t\t}\r\n\r\nBest regards,\r\nHou zj\r\n", "msg_date": "Sat, 25 Sep 2021 01:54:12 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Sep 24, 2021 at 6:55 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-23, Amit Kapila wrote:\n>\n> > Alvaro, do you have any thoughts on these proposed grammar changes?\n>\n> Yeah, I think pubobj_name remains a problem in that you don't know its\n> return type -- could be a String or a RangeVar, and the user of that\n> production can't distinguish. So you're still (unnecessarily, IMV)\n> stashing an object of undetermined type into ->object.\n>\n> I think you should get rid of both pubobj_name and pubobj_expr and do\n> somethine like this:\n>\n> /* FOR TABLE and FOR ALL TABLES IN SCHEMA specifications */\n> PublicationObjSpec: TABLE ColId\n> {\n> $$ = makeNode(PublicationObjSpec);\n> $$->pubobjtype = PUBLICATIONOBJ_TABLE;\n> $$->rangevar = makeRangeVarFromQualifiedName($1, NULL, @1, yyscanner);\n> }\n> | TABLE ColId indirection\n> {\n> $$ = makeNode(PublicationObjSpec);\n> $$->pubobjtype = PUBLICATIONOBJ_TABLE;\n> $$->rangevar = makeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\n> }\n> | ALL TABLES IN_P SCHEMA ColId\n> {\n> $$ = makeNode(PublicationObjSpec);\n> $$->pubobjtype = PUBLICATIONOBJ_REL_IN_SCHEMA;\n> $$->name = $4;\n> }\n> | ALL TABLES IN_P SCHEMA CURRENT_SCHEMA /* XXX should this be \"IN_P CURRENT_SCHEMA\"? */\n> {\n> $$ = makeNode(PublicationObjSpec);\n> $$->pubobjtype = PUBLICATIONOBJ_CURRSCHEMA;\n> $$->name = $4;\n> }\n> | ColId\n> {\n> $$ = makeNode(PublicationObjSpec);\n> $$->name = $1;\n> $$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\n> }\n> | ColId indirection\n> {\n> $$ = makeNode(PublicationObjSpec);\n> $$->rangevar = makeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\n> $$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\n> }\n> | CURRENT_SCHEMA\n> {\n> $$ = makeNode(PublicationObjSpec);\n> $$->pubobjtype = PUBLICATIONOBJ_CURRSCHEMA;\n> }\n> ;\n\nApart from the issue that Hou San pointed, I found one issue with\nintroduction of PUBLICATIONOBJ_CURRSCHEMA, I was not able to\ndifferentiate if it is table or schema in the following cases:\nCREATE PUBLICATION pub1 FOR ALL TABLES IN SCHEMA CURRENT_SCHEMA;\nCREATE PUBLICATION pub1 FOR ALL TABLES IN SCHEMA sch1, CURRENT_SCHEMA;\nCREATE PUBLICATION pub1 FOR table t1, CURRENT_SCHEMA;\nThe differentiation is required to differentiate and add a schema or a table.\n\nI felt it was better to use PUBLICATIONOBJ_CONTINUATION in case of\nCURRENT_SCHEMA in multiple object cases like:\nPublicationObjSpec: TABLE relation_expr\n {\n $$ =\nmakeNode(PublicationObjSpec);\n $$->pubobjtype =\nPUBLICATIONOBJ_TABLE;\n $$->rangevar = $2;\n }\n | ALL TABLES IN_P SCHEMA ColId\n {\n $$ =\nmakeNode(PublicationObjSpec);\n $$->pubobjtype =\nPUBLICATIONOBJ_REL_IN_SCHEMA;\n $$->name = $5;\n $$->location = @5;\n }\n | ALL TABLES IN_P SCHEMA CURRENT_SCHEMA /* XXX\nshould this be \"IN_P CURRENT_SCHEMA\"? */\n {\n $$ =\nmakeNode(PublicationObjSpec);\n $$->pubobjtype =\nPUBLICATIONOBJ_REL_IN_SCHEMA;\n $$->name = \"CURRENT_SCHEMA\";\n $$->location = @5;\n }\n | ColId\n {\n $$ =\nmakeNode(PublicationObjSpec);\n $$->name = $1;\n $$->pubobjtype =\nPUBLICATIONOBJ_CONTINUATION;\n $$->location = @1;\n }\n | ColId indirection\n {\n $$ =\nmakeNode(PublicationObjSpec);\n $$->rangevar =\nmakeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\n $$->pubobjtype =\nPUBLICATIONOBJ_CONTINUATION;\n $$->location = @1;\n }\n | CURRENT_SCHEMA\n {\n $$ =\nmakeNode(PublicationObjSpec);\n $$->pubobjtype =\nPUBLICATIONOBJ_CONTINUATION;\n $$->name = \"CURRENT_SCHEMA\";\n $$->location = @1;\n }\n | extended_relation_expr /* grammar\nlike tablename * , ONLY tablename, ONLY ( tablename )*/\n {\n $$ =\nmakeNode(PublicationObjSpec);\n /*$$->rangevar =\nmakeRangeVarFromQualifiedName($1, $2, @1, yyscanner); */\n $$->rangevar = $1;\n $$->pubobjtype =\nPUBLICATIONOBJ_CONTINUATION;\n }\n ;\n\nI'm ok with your suggestion along with the above proposed changes. I\nfelt the changes proposed at [1] were also fine. Let's change it to\nwhichever is better, easily extendable and can handle the Column\nfiltering project, ALL TABLES IN SCHEMA, ALL SEQUENCES IN SCHEMA\nprojects, and other projects in the future. Based on that we can check\nin the parser changes independently and then the remaining series of\nthe patches can be rebased on top of it accordingly. Thoughts?\n\n> so in AlterPublicationStmt you would have stanzas like\n>\n> | ALTER PUBLICATION name ADD_P pub_obj_list\n> {\n> AlterPublicationStmt *n = makeNode(AlterPublicationStmt);\n> n->pubname = $3;\n> n->pubobjects = preprocess_pubobj_list($5);\n> n->action = DEFELEM_ADD;\n> $$ = (Node *)n;\n> }\n>\n> where preprocess_pubobj_list (defined right after processCASbits and\n> somewhat mimicking it and SplitColQualList) takes all\n> PUBLICATIONOBJ_CONTINUATION and turns them into either\n> PUBLICATIONOBJ_TABLE entries or PUBLICATIONOBJ_REL_IN_SCHEMA entries,\n> depending on what the previous entry was. (And of course if there is no\n> previous entry, raise an error immediately). Note that node\n> PublicationObjSpec now has two fields, one for RangeVar and another for\n> a plain name, and tables always use the second one, except when they are\n> continuations, but of course those continuations that use name are\n> turned into rangevars in the preprocess step. I think that would make\n> the code in ObjectsInPublicationToOids less messy.\n\nI agree with this. I will make the changes for this in the next version.\n\n> (I don't think using the string \"CURRENT_SCHEMA\" is a great solution.\n> Did you try having a schema named CURRENT_SCHEMA?)\n\nHere CURRENT_SCHEMA is not used for the schema name, it will be\nreplaced with the name of the schema that is first in the search path.\n\n[1] - https://www.postgresql.org/message-id/CALDaNm1R-xbQvz4LU5OXu3KKwbWOz3uDcT_YjRU6V0R5FZDYDg%40mail.gmail.com\n\nRegards,\nVignesh\n\n\n", "msg_date": "Sat, 25 Sep 2021 13:15:08 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Sat, Sep 25, 2021 at 1:15 PM vignesh C <vignesh21@gmail.com> wrote:\n>\n> On Fri, Sep 24, 2021 at 6:55 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> >\n> > On 2021-Sep-23, Amit Kapila wrote:\n> >\n> > > Alvaro, do you have any thoughts on these proposed grammar changes?\n> >\n> > Yeah, I think pubobj_name remains a problem in that you don't know its\n> > return type -- could be a String or a RangeVar, and the user of that\n> > production can't distinguish. So you're still (unnecessarily, IMV)\n> > stashing an object of undetermined type into ->object.\n> >\n> > I think you should get rid of both pubobj_name and pubobj_expr and do\n> > somethine like this:\n> >\n> > /* FOR TABLE and FOR ALL TABLES IN SCHEMA specifications */\n> > PublicationObjSpec: TABLE ColId\n> > {\n> > $$ = makeNode(PublicationObjSpec);\n> > $$->pubobjtype = PUBLICATIONOBJ_TABLE;\n> > $$->rangevar = makeRangeVarFromQualifiedName($1, NULL, @1, yyscanner);\n> > }\n> > | TABLE ColId indirection\n> > {\n> > $$ = makeNode(PublicationObjSpec);\n> > $$->pubobjtype = PUBLICATIONOBJ_TABLE;\n> > $$->rangevar = makeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\n> > }\n> > | ALL TABLES IN_P SCHEMA ColId\n> > {\n> > $$ = makeNode(PublicationObjSpec);\n> > $$->pubobjtype = PUBLICATIONOBJ_REL_IN_SCHEMA;\n> > $$->name = $4;\n> > }\n> > | ALL TABLES IN_P SCHEMA CURRENT_SCHEMA /* XXX should this be \"IN_P CURRENT_SCHEMA\"? */\n> > {\n> > $$ = makeNode(PublicationObjSpec);\n> > $$->pubobjtype = PUBLICATIONOBJ_CURRSCHEMA;\n> > $$->name = $4;\n> > }\n> > | ColId\n> > {\n> > $$ = makeNode(PublicationObjSpec);\n> > $$->name = $1;\n> > $$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\n> > }\n> > | ColId indirection\n> > {\n> > $$ = makeNode(PublicationObjSpec);\n> > $$->rangevar = makeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\n> > $$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\n> > }\n> > | CURRENT_SCHEMA\n> > {\n> > $$ = makeNode(PublicationObjSpec);\n> > $$->pubobjtype = PUBLICATIONOBJ_CURRSCHEMA;\n> > }\n> > ;\n>\n> Apart from the issue that Hou San pointed, I found one issue with\n> introduction of PUBLICATIONOBJ_CURRSCHEMA, I was not able to\n> differentiate if it is table or schema in the following cases:\n> CREATE PUBLICATION pub1 FOR ALL TABLES IN SCHEMA CURRENT_SCHEMA;\n> CREATE PUBLICATION pub1 FOR ALL TABLES IN SCHEMA sch1, CURRENT_SCHEMA;\n> CREATE PUBLICATION pub1 FOR table t1, CURRENT_SCHEMA;\n> The differentiation is required to differentiate and add a schema or a table.\n>\n\nI am not sure what makes you say that we can't distinguish the above\ncases when there is already a separate rule for CURRENT_SCHEMA? I\nthink you can distinguish by tracking the previous objects as we are\nalready doing in the patch. But one thing that is not clear to me is\nis the reason to introduce a new type PUBLICATIONOBJ_CURRSCHEMA when\nwe use PUBLICATIONOBJ_REL_IN_SCHEMA and PUBLICATIONOBJ_CONTINUATION to\ndistinguish all cases of CURRENT_SCHEMA. Alvaro might have something\nin mind for this which is not apparent and that might have caused\nconfusion to you as well?\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 27 Sep 2021 16:40:50 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Sep 27, 2021 at 4:41 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Sat, Sep 25, 2021 at 1:15 PM vignesh C <vignesh21@gmail.com> wrote:\n> >\n> > On Fri, Sep 24, 2021 at 6:55 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > >\n> > > On 2021-Sep-23, Amit Kapila wrote:\n> > >\n> > > > Alvaro, do you have any thoughts on these proposed grammar changes?\n> > >\n> > > Yeah, I think pubobj_name remains a problem in that you don't know its\n> > > return type -- could be a String or a RangeVar, and the user of that\n> > > production can't distinguish. So you're still (unnecessarily, IMV)\n> > > stashing an object of undetermined type into ->object.\n> > >\n> > > I think you should get rid of both pubobj_name and pubobj_expr and do\n> > > somethine like this:\n> > >\n> > > /* FOR TABLE and FOR ALL TABLES IN SCHEMA specifications */\n> > > PublicationObjSpec: TABLE ColId\n> > > {\n> > > $$ = makeNode(PublicationObjSpec);\n> > > $$->pubobjtype = PUBLICATIONOBJ_TABLE;\n> > > $$->rangevar = makeRangeVarFromQualifiedName($1, NULL, @1, yyscanner);\n> > > }\n> > > | TABLE ColId indirection\n> > > {\n> > > $$ = makeNode(PublicationObjSpec);\n> > > $$->pubobjtype = PUBLICATIONOBJ_TABLE;\n> > > $$->rangevar = makeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\n> > > }\n> > > | ALL TABLES IN_P SCHEMA ColId\n> > > {\n> > > $$ = makeNode(PublicationObjSpec);\n> > > $$->pubobjtype = PUBLICATIONOBJ_REL_IN_SCHEMA;\n> > > $$->name = $4;\n> > > }\n> > > | ALL TABLES IN_P SCHEMA CURRENT_SCHEMA /* XXX should this be \"IN_P CURRENT_SCHEMA\"? */\n> > > {\n> > > $$ = makeNode(PublicationObjSpec);\n> > > $$->pubobjtype = PUBLICATIONOBJ_CURRSCHEMA;\n> > > $$->name = $4;\n> > > }\n> > > | ColId\n> > > {\n> > > $$ = makeNode(PublicationObjSpec);\n> > > $$->name = $1;\n> > > $$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\n> > > }\n> > > | ColId indirection\n> > > {\n> > > $$ = makeNode(PublicationObjSpec);\n> > > $$->rangevar = makeRangeVarFromQualifiedName($1, $2, @1, yyscanner);\n> > > $$->pubobjtype = PUBLICATIONOBJ_CONTINUATION;\n> > > }\n> > > | CURRENT_SCHEMA\n> > > {\n> > > $$ = makeNode(PublicationObjSpec);\n> > > $$->pubobjtype = PUBLICATIONOBJ_CURRSCHEMA;\n> > > }\n> > > ;\n> >\n> > Apart from the issue that Hou San pointed, I found one issue with\n> > introduction of PUBLICATIONOBJ_CURRSCHEMA, I was not able to\n> > differentiate if it is table or schema in the following cases:\n> > CREATE PUBLICATION pub1 FOR ALL TABLES IN SCHEMA CURRENT_SCHEMA;\n> > CREATE PUBLICATION pub1 FOR ALL TABLES IN SCHEMA sch1, CURRENT_SCHEMA;\n> > CREATE PUBLICATION pub1 FOR table t1, CURRENT_SCHEMA;\n> > The differentiation is required to differentiate and add a schema or a table.\n> >\n>\n> I am not sure what makes you say that we can't distinguish the above\n> cases when there is already a separate rule for CURRENT_SCHEMA? I\n> think you can distinguish by tracking the previous objects as we are\n> already doing in the patch. But one thing that is not clear to me is\n> is the reason to introduce a new type PUBLICATIONOBJ_CURRSCHEMA when\n> we use PUBLICATIONOBJ_REL_IN_SCHEMA and PUBLICATIONOBJ_CONTINUATION to\n> distinguish all cases of CURRENT_SCHEMA. Alvaro might have something\n> in mind for this which is not apparent and that might have caused\n> confusion to you as well?\n\nIt is difficult to identify this case:\n1) create publication pub1 for all tables in schema CURRENT_SCHEMA;\n2) create publication pub1 for CURRENT_SCHEMA;\n\nHere case 1 should succeed and case 2 should throw error:\nSince the object type will be set to PUBLICATIONOBJ_CURRSCHEMA in both\ncases, we cannot differentiate between them:\n1) ALL TABLES IN_P SCHEMA CURRENT_SCHEMA /* XXX should this be \"IN_P\nCURRENT_SCHEMA\"? */\n {\n $$ =\nmakeNode(PublicationObjSpec);\n $$->pubobjtype =\nPUBLICATIONOBJ_CURRSCHEMA;\n $$->name = $4;\n }\n2) CURRENT_SCHEMA\n {\n $$ =\nmakeNode(PublicationObjSpec);\n $$->pubobjtype =\nPUBLICATIONOBJ_CURRSCHEMA;\n }\n\nI felt it will work, if we set object type to\nPUBLICATIONOBJ_CONTINUATION in 2nd case(CURRENT_SCHEMA) and setting\nobject type to PUBLICATIONOBJ_REL_IN_SCHEMA or\nPUBLICATIONOBJ_CURRSCHEMA in 1st case( ALL TABLES IN_P SCHEMA\nCURRENT_SCHEMA).\nThoughts?\n\nRegards,\nVignesh\n\n\n", "msg_date": "Mon, 27 Sep 2021 17:13:55 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-27, Amit Kapila wrote:\n\n> I am not sure what makes you say that we can't distinguish the above\n> cases when there is already a separate rule for CURRENT_SCHEMA? I\n> think you can distinguish by tracking the previous objects as we are\n> already doing in the patch. But one thing that is not clear to me is\n> is the reason to introduce a new type PUBLICATIONOBJ_CURRSCHEMA when\n> we use PUBLICATIONOBJ_REL_IN_SCHEMA and PUBLICATIONOBJ_CONTINUATION to\n> distinguish all cases of CURRENT_SCHEMA. Alvaro might have something\n> in mind for this which is not apparent and that might have caused\n> confusion to you as well?\n\nMy issue is what happens if you have a schema that is named\nCURRENT_SCHEMA. In the normal case where you do ALL TABLES IN SCHEMA\n\"CURRENT_SCHEMA\" you would end up with a String containing\n\"CURRENT_SCHEMA\", so how do you distinguish that from ALL TABLES IN\nSCHEMA CURRENT_SCHEMA, which does not refer to the schema named\n\"CURRENT_SCHEMA\" but in Vignesh's proposal also uses a String containing\n\"CURRENT_SCHEMA\"?\n\nNow you could say \"but who would be stupid enough to do that??!\", but it\nseems easier to dodge the problem entirely. AFAICS our grammar never\nuses String \"CURRENT_SCHEMA\" to represent CURRENT_SCHEMA, but rather\nsome special enum value.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Mon, 27 Sep 2021 09:23:49 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi,\n\n\n>\n> I don't think there is one. I think the latest is what I posted in\n> https://postgr.es/m/202109061751.3qz5xpugwx6w@alvherre.pgsql (At least I\n> don't see any reply from Rahila with attachments after that), but that\n> wasn't addressing a bunch of review comments that had been made; and I\n> suspect that Amit K has already committed a few conflicting patches\n> after that.\n>\n> Yes, the v5 version of the patch attached by Alvaro is the latest one.\nIIUC, the review comments that are yet to be addressed apart from the\nongoing grammar\ndiscussion, are as follows:\n\n1. Behaviour on dropping a column from the table, that is a part of column\nfilter.\nIn the latest patch, the entire table is dropped from publication on\ndropping a column\nthat is a part of the column filter. However, there is preference for\nanother approach\nto drop just the column from the filter on DROP column CASCADE(continue to\nfilter\nthe other columns), and an error for DROP RESTRICT.\n\n2. Instead of WITH RECURSIVE query to find the topmost parent of the\npartition\nin fetch_remote_table_info, use pg_partition_tree and pg_partition_root.\n\n3. Report of memory leakage in get_rel_sync_entry().\n\n4. Missing documentation\n\n5. Latest comments(last two messages) by Peter Smith.\n\nThank you,\nRahila Syed\n\nHi, \nI don't think there is one.  I think the latest is what I posted in\nhttps://postgr.es/m/202109061751.3qz5xpugwx6w@alvherre.pgsql (At least I\ndon't see any reply from Rahila with attachments after that), but that\nwasn't addressing a bunch of review comments that had been made; and I\nsuspect that Amit K has already committed a few conflicting patches\nafter that.\nYes, the v5 version of the patch attached by Alvaro is the latest one. IIUC, the review comments that are yet to be addressed apart from the ongoing grammar discussion, are as follows:1. Behaviour on dropping a column from the table, that is a part of column filter.In the latest patch, the entire table is dropped from publication on dropping a columnthat is a part of the column filter. However, there is preference for another approachto drop just the column from the filter on DROP column CASCADE(continue to filterthe other columns), and an error for DROP RESTRICT. 2. Instead of WITH RECURSIVE query to find the topmost parent of the partitionin fetch_remote_table_info, use pg_partition_tree and pg_partition_root.3. Report of memory leakage in get_rel_sync_entry().4. Missing documentation5. Latest comments(last two messages) by Peter Smith.Thank you,Rahila Syed", "msg_date": "Mon, 27 Sep 2021 18:40:57 +0530", "msg_from": "Rahila Syed <rahilasyed90@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Sep 27, 2021 at 5:53 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-27, Amit Kapila wrote:\n>\n> > I am not sure what makes you say that we can't distinguish the above\n> > cases when there is already a separate rule for CURRENT_SCHEMA? I\n> > think you can distinguish by tracking the previous objects as we are\n> > already doing in the patch. But one thing that is not clear to me is\n> > is the reason to introduce a new type PUBLICATIONOBJ_CURRSCHEMA when\n> > we use PUBLICATIONOBJ_REL_IN_SCHEMA and PUBLICATIONOBJ_CONTINUATION to\n> > distinguish all cases of CURRENT_SCHEMA. Alvaro might have something\n> > in mind for this which is not apparent and that might have caused\n> > confusion to you as well?\n>\n> My issue is what happens if you have a schema that is named\n> CURRENT_SCHEMA. In the normal case where you do ALL TABLES IN SCHEMA\n> \"CURRENT_SCHEMA\" you would end up with a String containing\n> \"CURRENT_SCHEMA\", so how do you distinguish that from ALL TABLES IN\n> SCHEMA CURRENT_SCHEMA, which does not refer to the schema named\n> \"CURRENT_SCHEMA\" but in Vignesh's proposal also uses a String containing\n> \"CURRENT_SCHEMA\"?\n>\n> Now you could say \"but who would be stupid enough to do that??!\",\n>\n\nBut it is not allowed to create schema or table with the name\nCURRENT_SCHEMA, so not sure if we need to do anything special for it.\nHowever, if we want to handle it as a separate enum then the handling\nwould be something like:\n\n| ALL TABLES IN_P SCHEMA CURRENT_SCHEMA\n {\n $$ =\nmakeNode(PublicationObjSpec);\n $$->pubobjtype =\nPUBLICATIONOBJ_CURRSCHEMA;\n }\n...\n...\n| CURRENT_SCHEMA\n {\n $$ =\nmakeNode(PublicationObjSpec);\n $$->pubobjtype =\nPUBLICATIONOBJ_CONTINUATION;\n }\n ;\n\nNow, during post-processing, the PUBLICATIONOBJ_CONTINUATION will be\ndistinguished as CURRENT_SCHEMA because both rangeVar and name will be\nNULL. Do you have other ideas to deal with it? Vignesh has already\npoint in his email [1] why we can't keep pubobjtype as\nPUBLICATIONOBJ_CURRSCHEMA in the second case, so I used\nPUBLICATIONOBJ_CONTINUATION.\n\n[1] - https://www.postgresql.org/message-id/CALDaNm06shp%2BALwC2s-dV-S4k2o6bcmXnXGX4ETkoXxKHQfjfA%40mail.gmail.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 28 Sep 2021 08:59:51 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Sep 27, 2021 at 6:41 PM Rahila Syed <rahilasyed90@gmail.com> wrote:\n>\n>>\n>>\n>> I don't think there is one. I think the latest is what I posted in\n>> https://postgr.es/m/202109061751.3qz5xpugwx6w@alvherre.pgsql (At least I\n>> don't see any reply from Rahila with attachments after that), but that\n>> wasn't addressing a bunch of review comments that had been made; and I\n>> suspect that Amit K has already committed a few conflicting patches\n>> after that.\n>>\n> Yes, the v5 version of the patch attached by Alvaro is the latest one.\n> IIUC, the review comments that are yet to be addressed apart from the ongoing grammar\n> discussion, are as follows:\n>\n> 1. Behaviour on dropping a column from the table, that is a part of column filter.\n> In the latest patch, the entire table is dropped from publication on dropping a column\n> that is a part of the column filter. However, there is preference for another approach\n> to drop just the column from the filter on DROP column CASCADE(continue to filter\n> the other columns), and an error for DROP RESTRICT.\n>\n\nI am not sure if we can do this as pointed by me in one of the\nprevious emails [1]. I think additionally, you might want to take some\naction if the replica identity is changed as requested in the same\nemail [1].\n\n[1] - https://www.postgresql.org/message-id/CAA4eK1KCGF43pfLv8%2BmixcTMs%3DNkd6YdWL53LhiT1DvnuTg01g%40mail.gmail.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 28 Sep 2021 09:31:33 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-28, Amit Kapila wrote:\n\n> But it is not allowed to create schema or table with the name\n> CURRENT_SCHEMA, so not sure if we need to do anything special for it.\n\nOh? You certainly can.\n\nalvherre=# create schema \"CURRENT_SCHEMA\";\nCREATE SCHEMA\nalvherre=# \\dn\n Listado de esquemas\n Nombre | Dueño \n----------------+-------------------\n CURRENT_SCHEMA | alvherre\n public | pg_database_owner\n temp | alvherre\n(3 filas)\n\nalvherre=# create table \"CURRENT_SCHEMA\".\"CURRENT_SCHEMA\" (\"bother amit for a while\" int);\nCREATE TABLE\nalvherre=# \\d \"CURRENT_SCHEMA\".*\n Tabla «CURRENT_SCHEMA.CURRENT_SCHEMA»\n Columna | Tipo | Ordenamiento | Nulable | Por omisión \n-------------------------+---------+--------------+---------+-------------\n bother amit for a while | integer | | | \n\n\n> Now, during post-processing, the PUBLICATIONOBJ_CONTINUATION will be\n> distinguished as CURRENT_SCHEMA because both rangeVar and name will be\n> NULL. Do you have other ideas to deal with it?\n\nThat sounds plausible. There's no need for a name-free object of any other\nkind AFAICS, so there should be no conflict. If we ever do find a\nconflict, we can add another struct member to disambiguate.\n\nThanks\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\"Doing what he did amounts to sticking his fingers under the hood of the\nimplementation; if he gets his fingers burnt, it's his problem.\" (Tom Lane)\n\n\n", "msg_date": "Wed, 29 Sep 2021 10:19:35 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Sep 29, 2021 at 6:49 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-28, Amit Kapila wrote:\n>\n> > But it is not allowed to create schema or table with the name\n> > CURRENT_SCHEMA, so not sure if we need to do anything special for it.\n>\n> Oh? You certainly can.\n>\n> alvherre=# create schema \"CURRENT_SCHEMA\";\n> CREATE SCHEMA\n> alvherre=# \\dn\n> Listado de esquemas\n> Nombre | Dueño\n> ----------------+-------------------\n> CURRENT_SCHEMA | alvherre\n> public | pg_database_owner\n> temp | alvherre\n> (3 filas)\n>\n> alvherre=# create table \"CURRENT_SCHEMA\".\"CURRENT_SCHEMA\" (\"bother amit for a while\" int);\n> CREATE TABLE\n> alvherre=# \\d \"CURRENT_SCHEMA\".*\n> Tabla «CURRENT_SCHEMA.CURRENT_SCHEMA»\n> Columna | Tipo | Ordenamiento | Nulable | Por omisión\n> -------------------------+---------+--------------+---------+-------------\n> bother amit for a while | integer | | |\n>\n\noops, I was trying without quotes.\n\n>\n> > Now, during post-processing, the PUBLICATIONOBJ_CONTINUATION will be\n> > distinguished as CURRENT_SCHEMA because both rangeVar and name will be\n> > NULL. Do you have other ideas to deal with it?\n>\n> That sounds plausible. There's no need for a name-free object of any other\n> kind AFAICS, so there should be no conflict. If we ever do find a\n> conflict, we can add another struct member to disambiguate.\n>\n\nOkay, thanks. I feel now we are in agreement on the grammar rules.\n\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 30 Sep 2021 16:39:41 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi\n\nI took the latest posted patch, rebased on current sources, fixed the\nconflicts, and pgindented. No further changes. Here's the result. All\ntests are passing for me. Some review comments that were posted have\nnot been addressed yet; I'll look into that soon.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"Java is clearly an example of money oriented programming\" (A. Stepanov)", "msg_date": "Wed, 1 Dec 2021 17:29:47 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Oh, I just noticed that for some reason the test file was lost in the\nrebase, so those tests I thought I was running ... I wasn't. And of\ncourse if I put it back, it fails.\n\nMore later.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\"Crear es tan difícil como ser libre\" (Elsa Triolet)\n\n\n", "msg_date": "Wed, 1 Dec 2021 17:47:07 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-01, Alvaro Herrera wrote:\n\n> Hi\n> \n> I took the latest posted patch, rebased on current sources, fixed the\n> conflicts, and pgindented. No further changes. Here's the result. All\n> tests are passing for me. Some review comments that were posted have\n> not been addressed yet; I'll look into that soon.\n\nIn v7 I have reinstated the test file and fixed the silly problem that\ncaused it to fail (probably a mistake of mine while rebasing).\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\nMaybe there's lots of data loss but the records of data loss are also lost.\n(Lincoln Yeoh)", "msg_date": "Thu, 2 Dec 2021 11:23:58 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-16, Peter Smith wrote:\n\n> I noticed that the latest v5 no longer includes the TAP test which was\n> in the v4 patch.\n> \n> (src/test/subscription/t/021_column_filter.pl)\n> \n> Was that omission deliberate?\n\nSomehow I not only failed to notice the omission, but also your email\nwhere you told us about it. I have since posted a version of the patch\nthat again includes it.\n\nThanks!\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"No renuncies a nada. No te aferres a nada.\"\n\n\n", "msg_date": "Thu, 2 Dec 2021 16:15:03 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Dec 3, 2021 at 12:45 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Sep-16, Peter Smith wrote:\n>\n> > I noticed that the latest v5 no longer includes the TAP test which was\n> > in the v4 patch.\n> >\n> > (src/test/subscription/t/021_column_filter.pl)\n> >\n> > Was that omission deliberate?\n>\n> Somehow I not only failed to notice the omission, but also your email\n> where you told us about it. I have since posted a version of the patch\n> that again includes it.\n\nThanks for the patch, Few comments:\nI had a look at the patch, I felt the following should be handled:\n1) Dump changes to include the column filters while adding table to\npublication in dumpPublicationTable\n2) Documentation changes for column filtering in create_publication.sgml\n3) describe publication changes to support \\dRp command in describePublications\n4) I felt we need not allow specifying columns in case of \"alter\npublication drop table\" as currently dropping column filter is not\nallowed.\n5) We should check if the column specified is present in the table,\ncurrently we are able to specify non existent column for column\nfiltering\n+ foreach(lc, targetrel->columns)\n+ {\n+ char *colname;\n+\n+ colname = strVal(lfirst(lc));\n+ target_cols = lappend(target_cols, colname);\n+ }\n+ check_publication_add_relation(targetrel->relation, target_cols);\n\nRegards,\nVignesh\n\n\n", "msg_date": "Fri, 3 Dec 2021 14:47:39 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 02.12.21 15:23, Alvaro Herrera wrote:\n>> I took the latest posted patch, rebased on current sources, fixed the\n>> conflicts, and pgindented. No further changes. Here's the result. All\n>> tests are passing for me. Some review comments that were posted have\n>> not been addressed yet; I'll look into that soon.\n> \n> In v7 I have reinstated the test file and fixed the silly problem that\n> caused it to fail (probably a mistake of mine while rebasing).\n\nI looked through this a bit. You had said that you are still going to \nintegrate past review comments, so I didn't look to deeply before you \nget to that.\n\nAttached are a few fixup patches that you could integrate.\n\nThere was no documentation, so I wrote a bit (patch 0001). It only \ntouches the CREATE PUBLICATION and ALTER PUBLICATION pages at the \nmoment. There was no mention in the Logical Replication chapter that \nwarranted updating. Perhaps we should revisit that chapter at the end \nof the release cycle.\n\nDDL tests should be done in src/test/regress/sql/publication.sql rather \nthan through TAP tests, to keep it simpler. I have added a few that I \ncame up with (patch 0002). Note the FIXME marker that it does not \nrecognize if the listed columns don't exist. I removed a now redundant \ntest from the TAP test file. The other error condition test in the TAP \ntest file ('publication relation test_part removed') I didn't \nunderstand: test_part was added with columns (a, b), so why would \ndropping column b remove the whole entry? Maybe I missed something, or \nthis could be explained better.\n\nI was curious what happens when you have different publications with \ndifferent column lists, so I wrote a test for that (patch 0003). It \nturns out it works, so there is nothing to do, but perhaps the test is \nuseful to keep.\n\nThe test file 021_column_filter.pl should be renamed to an unused number \n(would be 027 currently). Also, it contains references to \"TRUNCATE\", \nwhere it was presumably copied from.\n\nOn the implementation side, I think the added catalog column \npg_publication_rel.prattrs should be an int2 array, not a text array. \nThat would also fix the above problem. If you have to look up the \ncolumns at DDL time, then you will notice when they don't exist.\n\nFinally, I suggest not naming this feature \"column filter\". I think \nthis name arose because of the analogy with the \"row filter\" feature \nalso being developed. But a filter is normally a dynamic data-driven \naction, which this is not. Golden Gate calls it in their documentation \n\"Selecting Columns\", or we could just call it \"column list\".", "msg_date": "Fri, 10 Dec 2021 14:08:52 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-10, Peter Eisentraut wrote:\n\n> I looked through this a bit. You had said that you are still going to\n> integrate past review comments, so I didn't look to deeply before you get to\n> that.\n\nThanks for doing this! As it happens I've spent the last couple of days\nworking on some of these details.\n\n> There was no documentation, so I wrote a bit (patch 0001). It only touches\n> the CREATE PUBLICATION and ALTER PUBLICATION pages at the moment. There was\n> no mention in the Logical Replication chapter that warranted updating.\n> Perhaps we should revisit that chapter at the end of the release cycle.\n\nThanks. I hadn't looked at the docs yet, so I'll definitely take this.\n\n> DDL tests should be done in src/test/regress/sql/publication.sql rather than\n> through TAP tests, to keep it simpler.\n\nYeah, I noticed this too but hadn't done it yet.\n\n> Note the FIXME marker that it does not recognize if the\n> listed columns don't exist.\n\nI had fixed this already, so I suppose it should be okay.\n\n> I removed a now redundant test from the TAP\n> test file. The other error condition test in the TAP test file\n> ('publication relation test_part removed') I didn't understand: test_part\n> was added with columns (a, b), so why would dropping column b remove the\n> whole entry? Maybe I missed something, or this could be explained better.\n\nThere was some discussion about it earlier in the thread and I was also\nagainst this proposed behavior.\n\n> I was curious what happens when you have different publications with\n> different column lists, so I wrote a test for that (patch 0003). It turns\n> out it works, so there is nothing to do, but perhaps the test is useful to\n> keep.\n\nGreat, thanks. Yes, I think it will be.\n\n> On the implementation side, I think the added catalog column\n> pg_publication_rel.prattrs should be an int2 array, not a text array.\n\nI already rewrote it to use a int2vector column in pg_publication_rel.\nThis interacted badly with the previous behavior on dropping columns,\nwhich I have to revisit, but otherwise it seems much better.\n(Particularly since we don't need to care about quoting names and such.)\n\n> Finally, I suggest not naming this feature \"column filter\". I think this\n> name arose because of the analogy with the \"row filter\" feature also being\n> developed. But a filter is normally a dynamic data-driven action, which\n> this is not. Golden Gate calls it in their documentation \"Selecting\n> Columns\", or we could just call it \"column list\".\n\nHmm, I hadn't thought of renaming the feature, but I have to admit that\nI was confused because of the name, so I agree with choosing some other\nname.\n\nI'll integrate your changes and post the whole thing later.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\nSi no sabes adonde vas, es muy probable que acabes en otra parte.\n\n\n", "msg_date": "Fri, 10 Dec 2021 10:23:51 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Sep-02, Alvaro Herrera wrote:\n\n> On 2021-Sep-02, Rahila Syed wrote:\n> \n> > After thinking about this, I think it is best to remove the entire table\n> > from publication,\n> > if a column specified in the column filter is dropped from the table.\n> \n> Hmm, I think it would be cleanest to give responsibility to the user: if\n> the column to be dropped is in the filter, then raise an error, aborting\n> the drop. Then it is up to them to figure out what to do.\n\nI thought about this some more and realized that our earlier conclusions\nwere wrong or at least inconvenient. I think that the best behavior if\nyou drop a column from a table is to remove the column from the\npublication column list, and do nothing else.\n\nConsider the case where you add a table to a publication without a\ncolumn filter, and later drop the column. You don't get an error that\nthe relation is part of a publication; simply, the subscribers of that\npublication will no longer receive that column.\n\nSimilarly for this case: if you add a table to a publication with a\ncolumn list, and later drop a column in that list, then you shouldn't\nget an error either. Simply the subscribers of that publication should\nreceive one column less.\n\nShould be fairly quick to implement ... on it now.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"The problem with the facetime model is not just that it's demoralizing, but\nthat the people pretending to work interrupt the ones actually working.\"\n (Paul Graham)\n\n\n", "msg_date": "Fri, 10 Dec 2021 17:02:08 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-10, Alvaro Herrera wrote:\n\n> I thought about this some more and realized that our earlier conclusions\n> were wrong or at least inconvenient. I think that the best behavior if\n> you drop a column from a table is to remove the column from the\n> publication column list, and do nothing else.\n\n> Should be fairly quick to implement ... on it now.\n\nActually it's not so easy to implement.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"No hay ausente sin culpa ni presente sin disculpa\" (Prov. francés)\n\n\n", "msg_date": "Fri, 10 Dec 2021 18:08:24 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-10, Alvaro Herrera wrote:\n\n> Actually it's not so easy to implement.\n\nSo I needed to add \"sub object id\" support for pg_publication_rel\nobjects in pg_depend / dependency.c. What I have now is partial (the\ndescribe routines need patched) but it's sufficient to show what's\nneeded. In essence, we now set these depend entries with column\nnumbers, so that they can be dropped independently; when the drop comes,\nthe existing pg_publication_rel row is modified to cover the remaining\ncolumns. As far as I can tell, it works correctly.\n\nThere is one policy decision to make: what if ALTER TABLE drops the last\nremaining column in the publication? I opted to raise a specific error\nin this case, though we could just the same opt to drop the relation\nfrom the publication. Are there opinions on this?\n\nThis version incorporates the fixups Peter submitted, plus some other\nfixes of my own. Notably, as Peter also mentioned, I changed\npg_publication_rel.prattrs to store int2vector rather than an array of\ncolumn names. This makes for better behavior if columns are renamed and\nthings like that, and also we don't need to be so cautious about\nquoting. It does mean we need a slightly more complicated query in a\ncouple of spots, but that should be okay.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\"Always assume the user will do much worse than the stupidest thing\nyou can imagine.\" (Julien PUYDT)", "msg_date": "Mon, 13 Dec 2021 14:44:11 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hmm, I messed up the patch file I sent. Here's the complete patch.\n\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"Doing what he did amounts to sticking his fingers under the hood of the\nimplementation; if he gets his fingers burnt, it's his problem.\" (Tom Lane)", "msg_date": "Mon, 13 Dec 2021 14:47:50 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-13, Alvaro Herrera wrote:\n\n> Hmm, I messed up the patch file I sent. Here's the complete patch.\n\nActually, this requires even a bit more mess than this to be really\ncomplete if we want to be strict about it. The reason is that, with the\npatch I just posted, we're creating a new type of representable object\nthat will need to have some way of making it through pg_identify_object,\npg_get_object_address, pg_identify_object_as_address. This is only\nvisible as one tries to patch object_address.sql (auditability of DDL\noperations being the goal).\n\nI think this means we need a new OBJECT_PUBLICATION_REL_COLUMN value in\nthe ObjectType (paralelling OBJECT_COLUMN), and no new ObjectClass\nvalue. Looking now to confirm.\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\"El que vive para el futuro es un iluso, y el que vive para el pasado,\nun imbécil\" (Luis Adler, \"Los tripulantes de la noche\")\n\n\n", "msg_date": "Mon, 13 Dec 2021 18:19:54 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-13, Alvaro Herrera wrote:\n\n> I think this means we need a new OBJECT_PUBLICATION_REL_COLUMN value in\n> the ObjectType (paralelling OBJECT_COLUMN), and no new ObjectClass\n> value. Looking now to confirm.\n\nAfter working on this a little bit more, I realized that this is a bad\nidea overall. It causes lots of complications and it's just not worth\nit. So I'm back at my original thought that we need to throw an ERROR\nat ALTER TABLE .. DROP COLUMN time if the column is part of a\nreplication column filter, and suggest the user to remove the column\nfrom the filter first and reattempt the DROP COLUMN.\n\nThis means that we need to support changing the column list of a table\nin a publication. I'm looking at implementing some form of ALTER\nPUBLICATION for that.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"Find a bug in a program, and fix it, and the program will work today.\nShow the program how to find and fix a bug, and the program\nwill work forever\" (Oliver Silfridge)\n\n\n", "msg_date": "Tue, 14 Dec 2021 13:43:48 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 12/14/21 17:43, Alvaro Herrera wrote:\n> On 2021-Dec-13, Alvaro Herrera wrote:\n> \n>> I think this means we need a new OBJECT_PUBLICATION_REL_COLUMN value in\n>> the ObjectType (paralelling OBJECT_COLUMN), and no new ObjectClass\n>> value. Looking now to confirm.\n> \n> After working on this a little bit more, I realized that this is a bad\n> idea overall. It causes lots of complications and it's just not worth\n> it. So I'm back at my original thought that we need to throw an ERROR\n> at ALTER TABLE .. DROP COLUMN time if the column is part of a\n> replication column filter, and suggest the user to remove the column\n> from the filter first and reattempt the DROP COLUMN.\n> \n> This means that we need to support changing the column list of a table\n> in a publication. I'm looking at implementing some form of ALTER\n> PUBLICATION for that.\n> \n\nYeah. I think it's not clear if this should behave more like an index or \na view. When an indexed column gets dropped we simply drop the index. \nBut if you drop a column referenced by a view, we fail with an error. I \nthink we should handle this more like a view, because publications are \nexternally visible objects too (while indexes are pretty much just an \nimplementation detail).\n\nBut why would it be easier not to add new object type? We still need to \ncheck there is no publication referencing the column - either you do \nthat automatically through a dependency, or you do that by custom code. \nUsing a dependency seems better to me, but I don't know what are the \ncomplications you mentioned.\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Tue, 14 Dec 2021 20:28:53 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-14, Tomas Vondra wrote:\n\n> Yeah. I think it's not clear if this should behave more like an index or a\n> view. When an indexed column gets dropped we simply drop the index. But if\n> you drop a column referenced by a view, we fail with an error. I think we\n> should handle this more like a view, because publications are externally\n> visible objects too (while indexes are pretty much just an implementation\n> detail).\n\nI agree -- I think it's more like a view than like an index. (The\noriginal proposal was that if you dropped a column that was part of the\ncolumn list of a relation in a publication, the entire relation is\ndropped from the view, but that doesn't seem very friendly behavior --\nyou break the replication stream immediately if you do that, and the\nonly way to fix it is to send a fresh copy of the remaining subset of\ncolumns.)\n\n> But why would it be easier not to add new object type? We still need to\n> check there is no publication referencing the column - either you do that\n> automatically through a dependency, or you do that by custom code. Using a\n> dependency seems better to me, but I don't know what are the complications\n> you mentioned.\n\nThe problem is that we need a way to represent the object \"column of a\ntable in a publication\". I found myself adding a lot of additional code\nto support OBJECT_PUBLICATION_REL_COLUMN and that seemed like too much.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Tue, 14 Dec 2021 16:35:23 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 12/14/21 20:35, Alvaro Herrera wrote:\n> On 2021-Dec-14, Tomas Vondra wrote:\n> \n>> Yeah. I think it's not clear if this should behave more like an index or a\n>> view. When an indexed column gets dropped we simply drop the index. But if\n>> you drop a column referenced by a view, we fail with an error. I think we\n>> should handle this more like a view, because publications are externally\n>> visible objects too (while indexes are pretty much just an implementation\n>> detail).\n> \n> I agree -- I think it's more like a view than like an index. (The\n> original proposal was that if you dropped a column that was part of the\n> column list of a relation in a publication, the entire relation is\n> dropped from the view, but that doesn't seem very friendly behavior --\n> you break the replication stream immediately if you do that, and the\n> only way to fix it is to send a fresh copy of the remaining subset of\n> columns.)\n> \n\nRight, that's my reasoning too.\n\n>> But why would it be easier not to add new object type? We still need to\n>> check there is no publication referencing the column - either you do that\n>> automatically through a dependency, or you do that by custom code. Using a\n>> dependency seems better to me, but I don't know what are the complications\n>> you mentioned.\n> \n> The problem is that we need a way to represent the object \"column of a\n> table in a publication\". I found myself adding a lot of additional code\n> to support OBJECT_PUBLICATION_REL_COLUMN and that seemed like too much.\n> \n\nMy experience with dependencies is pretty limited, but can't we simply \nmake a dependency between the whole publication and the column?\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Tue, 14 Dec 2021 22:13:49 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi,\n\nI went through the v9 patch, and I have a couple comments / questions. \nApologies if some of this was already discussed earlier, it's hard to \ncross-check in such a long thread. Most of the comments are in 0002 to \nmake it easier to locate, and it also makes proposed code changes \nclearer I think.\n\n1) check_publication_add_relation - the \"else\" branch is not really \nneeded, because the \"if (replidentfull)\" always errors-out\n\n2) publication_add_relation has a FIXME about handling cases with \ndifferent column list\n\nSo what's the right behavior for ADD TABLE with different column list? \nI'd say we should allow that, and that it should be mostly the same \nthing as adding/removing columns to the list incrementally, i.e. we \nshould replace the column lists. We could also prohibit such changes, \nbut that seems like a really annoying limitation, forcing people to \nremove/add the relation.\n\nI added some comments to the attmap translation block, and replaced <0 \ncheck with AttrNumberIsForUserDefinedAttr.\n\nBut I wonder if we could get rid of the offset, considering we're \ndealing with just user-defined attributes. That'd make the code clearer, \nbut it would break if we're comparing it to other bitmaps with offsets. \nBut I don't think we do.\n\n3) I doubt \"att_map\" is the right name, though. AFAICS it's just a list \nof columns for the relation, not a map, right? So maybe attr_list?\n\n4) AlterPublication talks about \"publication status\" for a column, but \ndo we actually track that? Or what does that mean?\n\n5) PublicationDropTables does a check\n\n if (pubrel->columns)\n ereport(ERROR,\n errcode(ERRCODE_SYNTAX_ERROR),\n\nShouldn't this be prevented by the grammar, really? Also, it should be \nin regression tests.\n\n6) Another thing that should be in the test is partitioned table with \nattribute mapping and column list, to see how map and attr_map interact.\n\n7) There's a couple places doing this\n\n if (att_map != NULL &&\n !bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,\n att_map) &&\n !bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,\n idattrs) &&\n !replidentfull)\n\nwhich is really hard to understand (even if we get rid of the offset), \nso maybe let's move that to a function with sensible name. Also, some \nplaces don't check indattrs - seems a bit suspicious.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Tue, 14 Dec 2021 22:28:11 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tues, Dec 14, 2021 1:48 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\r\n> Hmm, I messed up the patch file I sent. Here's the complete patch.\r\n> \r\n\r\nHi,\r\n\r\nI have a minor question about the replica identity check of this patch.\r\n\r\n+check_publication_add_relation(Relation targetrel, Bitmapset *columns)\r\n...\r\n+\t\t\tidattrs = RelationGetIndexAttrBitmap(targetrel,\r\n+\t\t\t\t\t\t\t\t\t\t\t\t INDEX_ATTR_BITMAP_IDENTITY_KEY);\r\n+\t\t\tif (!bms_is_subset(idattrs, columns))\r\n+\t\t\t\tereport(ERROR,\r\n+\t\t\t\t\t\terrcode(ERRCODE_INVALID_COLUMN_REFERENCE),\r\n+\t\t\t\t\t\terrmsg(\"invalid column list for publishing relation \\\"%s\\\"\",\r\n+\t\t\t\t\t\t\t RelationGetRelationName(targetrel)),\r\n+\t\t\t\t\t\terrdetail(\"All columns in REPLICA IDENTITY must be present in the column list.\"));\r\n+\r\n\r\nThe patch ensures all columns of RT are in column list when CREATE/ALTER\r\npublication, but it seems doesn't prevent user from changing the replica\r\nidentity or dropping the index used in replica identity. Do we also need to\r\ncheck those cases ?\r\n\r\nBest regards,\r\nHou zj\r\n", "msg_date": "Thu, 16 Dec 2021 09:28:28 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-16, houzj.fnst@fujitsu.com wrote:\n\n> The patch ensures all columns of RT are in column list when CREATE/ALTER\n> publication, but it seems doesn't prevent user from changing the replica\n> identity or dropping the index used in replica identity. Do we also need to\n> check those cases ?\n\nYes, we do. As it happens, I spent a couple of hours yesterday writing\ncode for that, at least partially. I haven't yet checked what happens\nwith cases like REPLICA NOTHING, or REPLICA INDEX <xyz> and then\ndropping that index.\n\nMy initial ideas were a bit wrong BTW: I thought we should check the\ncombination of column lists in all publications (a bitwise-OR of column\nbitmaps, so to speak). But conceptually that's wrong: we need to check\nthe column list of each publication individually instead. Otherwise, if\nyou wanted to hide a column from some publication but that column was\npart of the replica identity, there'd be no way to identify the tuple in\nthe replica. (Or, if the pgouput code disobeys the column list and\nsends the replica identity even if it's not in the column list, then\nyou'd be potentially publishing data that you wanted to hide.)\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Thu, 16 Dec 2021 14:54:59 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-14, Tomas Vondra wrote:\n\n> 7) There's a couple places doing this\n> \n> if (att_map != NULL &&\n> !bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,\n> att_map) &&\n> !bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,\n> idattrs) &&\n> !replidentfull)\n> \n> which is really hard to understand (even if we get rid of the offset), so\n> maybe let's move that to a function with sensible name. Also, some places\n> don't check indattrs - seems a bit suspicious.\n\nIt is indeed pretty hard to read ... but I think this is completely\nunnecessary. Any column that is part of the identity should have been\nincluded in the column filter, so there is no need to check for the\nidentity attributes separately. Testing just for the columns in the\nfilter ought to be sufficient; and the cases \"if att_map NULL\" and \"is\nreplica identity FULL\" are also equivalent, because in the case of FULL,\nyou're disallowed from setting a column list. So this whole thing can\nbe reduced to just this:\n\nif (att_map != NULL && !bms_is_member(att->attnum, att_map))\n continue;\t/* that is, don't send this attribute */\n\nso I don't think this merits a separate function.\n\n[ says he, after already trying to write said function ]\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"Before you were born your parents weren't as boring as they are now. They\ngot that way paying your bills, cleaning up your room and listening to you\ntell them how idealistic you are.\" -- Charles J. Sykes' advice to teenagers\n\n\n", "msg_date": "Thu, 16 Dec 2021 17:10:58 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Dec 15, 2021 at 1:05 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Dec-14, Tomas Vondra wrote:\n>\n> > Yeah. I think it's not clear if this should behave more like an index or a\n> > view. When an indexed column gets dropped we simply drop the index. But if\n> > you drop a column referenced by a view, we fail with an error. I think we\n> > should handle this more like a view, because publications are externally\n> > visible objects too (while indexes are pretty much just an implementation\n> > detail).\n>\n> I agree -- I think it's more like a view than like an index. (The\n> original proposal was that if you dropped a column that was part of the\n> column list of a relation in a publication, the entire relation is\n> dropped from the view,\n>\n\nI think in the above sentence, you mean to say \"dropped from the\npublication\". So, IIUC, you are proposing that if one drops a column\nthat was part of the column list of a relation in a publication, an\nerror will be raised. Also, if the user specifies CASCADE in Alter\nTable ... Drop Column, then we drop the relation from publication. Is\nthat right? BTW, this is somewhat on the lines of what row_filter\npatch is also doing where if the user drops the column that was part\nof row_filter for a relation in publication, we give an error and if\nthe user tries to drop the column with CASCADE then the relation is\nremoved from the publication.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 17 Dec 2021 10:17:33 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 17.12.21 05:47, Amit Kapila wrote:\n> I think in the above sentence, you mean to say \"dropped from the\n> publication\". So, IIUC, you are proposing that if one drops a column\n> that was part of the column list of a relation in a publication, an\n> error will be raised. Also, if the user specifies CASCADE in Alter\n> Table ... Drop Column, then we drop the relation from publication. Is\n> that right? BTW, this is somewhat on the lines of what row_filter\n> patch is also doing where if the user drops the column that was part\n> of row_filter for a relation in publication, we give an error and if\n> the user tries to drop the column with CASCADE then the relation is\n> removed from the publication.\n\nThat looks correct. Consider how triggers behave: Dropping a column \nthat a trigger uses (either in UPDATE OF or a WHEN condition) errors \nwith RESTRICT and drops the trigger with CASCADE.\n\n\n\n", "msg_date": "Fri, 17 Dec 2021 08:03:33 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Friday, December 17, 2021 1:55 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\r\n> On 2021-Dec-16, houzj.fnst@fujitsu.com wrote:\r\n> \r\n> > The patch ensures all columns of RT are in column list when\r\n> > CREATE/ALTER publication, but it seems doesn't prevent user from\r\n> > changing the replica identity or dropping the index used in replica\r\n> > identity. Do we also need to check those cases ?\r\n> \r\n> Yes, we do. As it happens, I spent a couple of hours yesterday writing code for\r\n> that, at least partially. I haven't yet checked what happens with cases like\r\n> REPLICA NOTHING, or REPLICA INDEX <xyz> and then dropping that index.\r\n> \r\n> My initial ideas were a bit wrong BTW: I thought we should check the\r\n> combination of column lists in all publications (a bitwise-OR of column bitmaps,\r\n> so to speak). But conceptually that's wrong: we need to check the column list\r\n> of each publication individually instead. Otherwise, if you wanted to hide a\r\n> column from some publication but that column was part of the replica identity,\r\n> there'd be no way to identify the tuple in the replica. (Or, if the pgouput code\r\n> disobeys the column list and sends the replica identity even if it's not in the\r\n> column list, then you'd be potentially publishing data that you wanted to hide.)\r\n\r\nThanks for the explanation.\r\n\r\nApart from ALTER REPLICA IDENTITY and DROP INDEX, I think there could be\r\nsome other cases we need to handle for the replica identity check:\r\n\r\n1)\r\nWhen adding a partitioned table with column list to the publication, I think we\r\nneed to check the RI of all its leaf partition. Because the RI on the partition\r\nis the one actually takes effect.\r\n\r\n2)\r\nALTER TABLE ADD PRIMARY KEY;\r\nALTER TABLE DROP CONSTRAINT \"PRIMAEY KEY\";\r\n\r\nIf the replica identity is default, it will use the primary key. we might also\r\nneed to prevent user from adding or removing primary key in this case.\r\n\r\n\r\nBased on the above cases, the RI check seems could bring considerable amount of\r\ncode. So, how about we follow what we already did in CheckCmdReplicaIdentity(),\r\nwe can put the check for RI in that function, so that we can cover all the\r\ncases and reduce the code change. And if we are worried about the cost of do\r\nthe check for UPDATE and DELETE every time, we can also save the result in the\r\nrelcache. It's safe because every operation change the RI will invalidate the\r\nrelcache. We are using this approach in row filter patch to make sure all\r\ncolumns in row filter expression are part of RI.\r\n\r\nBest regards,\r\nHou zj\r\n", "msg_date": "Fri, 17 Dec 2021 09:46:14 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Column Filtering in Logical Replication" }, { "msg_contents": "Hi,\n\nThank you for updating the patch. The regression tests and tap tests pass\nwith v9 patch.\n\n\n>\n> After working on this a little bit more, I realized that this is a bad\n> idea overall. It causes lots of complications and it's just not worth\n> it. So I'm back at my original thought that we need to throw an ERROR\n> at ALTER TABLE .. DROP COLUMN time if the column is part of a\n> replication column filter, and suggest the user to remove the column\n> from the filter first and reattempt the DROP COLUMN.\n>\n> This means that we need to support changing the column list of a table\n> in a publication. I'm looking at implementing some form of ALTER\n> PUBLICATION for that.\n>\n>\nI think right now the patch contains support only for ALTER PUBLICATION..\nADD TABLE with column filters.\nIn order to achieve changing the column lists of a published table, I think\nwe can extend the\nALTER TABLE ..SET TABLE syntax to support specification of column list.\n\nSo this whole thing can\n> be reduced to just this:\n\n\nif (att_map != NULL && !bms_is_member(att->attnum, att_map))\n> continue; /* that is, don't send this attribute */\n\n\nI agree the condition can be shortened now. The long if condition was\nincluded because initially the feature\nallowed specifying filters without replica identity columns(sent those\ncolumns internally without user\nhaving to specify).\n\n 900 + * the table is partitioned. Run a recursive query to iterate\n> through all\n> 901 + * the parents of the partition and retreive the record for\n> the parent\n> 902 + * that exists in pg_publication_rel.\n> 903 + */\n\n\nThe above comment in fetch_remote_table_info() can be changed as the\nrecursive query\nis no longer used.\n\nThank you,\nRahila Syed\n\nHi,Thank you for updating the patch. The regression tests and tap tests pass with v9 patch.\n\nAfter working on this a little bit more, I realized that this is a bad\nidea overall.  It causes lots of complications and it's just not worth\nit.  So I'm back at my original thought that we need to throw an ERROR\nat ALTER TABLE .. DROP COLUMN time if the column is part of a\nreplication column filter, and suggest the user to remove the column\nfrom the filter first and reattempt the DROP COLUMN.\n\nThis means that we need to support changing the column list of a table\nin a publication.  I'm looking at implementing some form of ALTER\nPUBLICATION for that.I think right now the patch contains support only for ALTER PUBLICATION.. ADD TABLE with column filters.In order to achieve changing the column lists of a published table, I think we can extend theALTER TABLE ..SET TABLE syntax to support specification of column list.So this whole thing canbe reduced to just this:if (att_map != NULL && !bms_is_member(att->attnum, att_map))       continue;        /* that is, don't send this attribute */I agree the condition can be shortened now. The long if condition was included because initially the featureallowed specifying filters without replica identity columns(sent those columns internally without userhaving to specify). 900 +        * the table is partitioned.  Run a recursive query to iterate through all 901 +        * the parents of the partition and retreive the record for the parent 902 +        * that exists in pg_publication_rel. 903 +        */The above comment in fetch_remote_table_info() can be changed as the recursive queryis no longer used.Thank you,Rahila Syed", "msg_date": "Fri, 17 Dec 2021 17:39:37 +0530", "msg_from": "Rahila Syed <rahilasyed90@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-17, Rahila Syed wrote:\n\n> > This means that we need to support changing the column list of a\n> > table in a publication. I'm looking at implementing some form of\n> > ALTER PUBLICATION for that.\n>\n> I think right now the patch contains support only for ALTER\n> PUBLICATION.. ADD TABLE with column filters. In order to achieve\n> changing the column lists of a published table, I think we can extend\n> the ALTER TABLE ..SET TABLE syntax to support specification of column\n> list.\n\nYeah, that's what I was thinking too.\n\n> > So this whole thing can be reduced to just this:\n> \n> > if (att_map != NULL && !bms_is_member(att->attnum, att_map))\n> > continue; /* that is, don't send this attribute */\n> \n> I agree the condition can be shortened now. The long if condition was\n> included because initially the feature allowed specifying filters\n> without replica identity columns(sent those columns internally without\n> user having to specify).\n\nAh, true, I had forgotten that. Thanks.\n\n> > 900 + * the table is partitioned. Run a recursive query to iterate through all\n> > 901 + * the parents of the partition and retreive the record for the parent\n> > 902 + * that exists in pg_publication_rel.\n> > 903 + */\n> \n> The above comment in fetch_remote_table_info() can be changed as the\n> recursive query is no longer used.\n\nOh, of course.\n\nI'll finish some loose ends and submit a v10, but it's still not final.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\"Right now the sectors on the hard disk run clockwise, but I heard a rumor that\nyou can squeeze 0.2% more throughput by running them counterclockwise.\nIt's worth the effort. Recommended.\" (Gerry Pourwelle)\n\n\n", "msg_date": "Fri, 17 Dec 2021 12:58:50 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "So I've been thinking about this as a \"security\" item (you can see my\ncomments to that effect sprinkled all over this thread), in the sense\nthat if a publication \"hides\" some column, then the replica just won't\nget access to it. But in reality that's mistaken: the filtering that\nthis patch implements is done based on the queries that *the replica*\nexecutes at its own volition; if the replica decides to ignore the list\nof columns, it'll be able to get all columns. All it takes is an\nuncooperative replica in order for the lot of data to be exposed anyway.\n\nIf the server has a *separate* security mechanism to hide the columns\n(per-column privs), it is that feature that will protect the data, not\nthe logical-replication-feature to filter out columns.\n\n\nThis led me to realize that the replica-side code in tablesync.c is\ntotally oblivious to what's the publication through which a table is\nbeing received from in the replica. So we're not aware of a replica\nbeing exposed only a subset of columns through some specific\npublication; and a lot more hacking is needed than this patch does, in\norder to be aware of which publications are being used.\n\nI'm going to have a deeper look at this whole thing.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Fri, 17 Dec 2021 18:07:18 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 12/17/21 22:07, Alvaro Herrera wrote:\n> So I've been thinking about this as a \"security\" item (you can see my\n> comments to that effect sprinkled all over this thread), in the sense\n> that if a publication \"hides\" some column, then the replica just won't\n> get access to it. But in reality that's mistaken: the filtering that\n> this patch implements is done based on the queries that *the replica*\n> executes at its own volition; if the replica decides to ignore the list\n> of columns, it'll be able to get all columns. All it takes is an\n> uncooperative replica in order for the lot of data to be exposed anyway.\n> \n\nInteresting, I haven't really looked at this as a security feature. And \nin my experience if something is not carefully designed to be secure \nfrom the get go, it's really hard to add that bit later ...\n\nYou say it's the replica making the decisions, but my mental model is \nit's the publisher decoding the data for a given list of publications \n(which indeed is specified by the subscriber). But the subscriber can't \ntweak the definition of publications, right? Or what do you mean by \nqueries executed by the replica? What are the gap?\n\n> If the server has a *separate* security mechanism to hide the columns\n> (per-column privs), it is that feature that will protect the data, not\n> the logical-replication-feature to filter out columns.\n> \n\nRight. Although I haven't thought about how logical decoding interacts \nwith column privileges. I don't think logical decoding actually checks \ncolumn privileges - I certainly don't recall any ACL checks in \nsrc/backend/replication ...\n\nAFAIK we only really check privileges during initial sync (when creating \nthe slot and copying data), but then we keep replicating data even if \nthe privilege gets revoked for the table/column. In principle the \nreplication role is pretty close to superuser.\n\n> \n> This led me to realize that the replica-side code in tablesync.c is\n> totally oblivious to what's the publication through which a table is\n> being received from in the replica. So we're not aware of a replica\n> being exposed only a subset of columns through some specific\n> publication; and a lot more hacking is needed than this patch does, in\n> order to be aware of which publications are being used.\n> \n> I'm going to have a deeper look at this whole thing.\n> \n\nDoes that mean we currently sync all the columns in the initial sync, \nand only start filtering columns later while decoding transactions?\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Fri, 17 Dec 2021 22:57:56 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-17, Tomas Vondra wrote:\n\n> On 12/17/21 22:07, Alvaro Herrera wrote:\n> > So I've been thinking about this as a \"security\" item (you can see my\n> > comments to that effect sprinkled all over this thread), in the sense\n> > that if a publication \"hides\" some column, then the replica just won't\n> > get access to it. But in reality that's mistaken: the filtering that\n> > this patch implements is done based on the queries that *the replica*\n> > executes at its own volition; if the replica decides to ignore the list\n> > of columns, it'll be able to get all columns. All it takes is an\n> > uncooperative replica in order for the lot of data to be exposed anyway.\n> \n> Interesting, I haven't really looked at this as a security feature. And in\n> my experience if something is not carefully designed to be secure from the\n> get go, it's really hard to add that bit later ...\n\nI guess the way to really harden replication is to use the GRANT system\nat the publisher's side to restrict access for the replication user.\nThis would provide actual security. So you're right that I seem to be\nbarking at the wrong tree ... maybe I need to give a careful look at\nthe documentation for logical replication to understand what is being\noffered, and to make sure that we explicitly indicate that limiting the\ncolumn list does not provide any actual security.\n\n> You say it's the replica making the decisions, but my mental model is it's\n> the publisher decoding the data for a given list of publications (which\n> indeed is specified by the subscriber). But the subscriber can't tweak the\n> definition of publications, right? Or what do you mean by queries executed\n> by the replica? What are the gap?\n\nI am thinking in somebody modifying the code that the replica runs, so\nthat it ignores the column list that the publication has been configured\nto provide; instead of querying only those columns, it would query all\ncolumns.\n\n> > If the server has a *separate* security mechanism to hide the columns\n> > (per-column privs), it is that feature that will protect the data, not\n> > the logical-replication-feature to filter out columns.\n> \n> Right. Although I haven't thought about how logical decoding interacts with\n> column privileges. I don't think logical decoding actually checks column\n> privileges - I certainly don't recall any ACL checks in\n> src/backend/replication ...\n\nWell, in practice if you're confronted with a replica that's controlled\nby a malicious user that can tweak its behavior, then replica-side\nprivilege checking won't do anything useful.\n\n> > This led me to realize that the replica-side code in tablesync.c is\n> > totally oblivious to what's the publication through which a table is\n> > being received from in the replica. So we're not aware of a replica\n> > being exposed only a subset of columns through some specific\n> > publication; and a lot more hacking is needed than this patch does, in\n> > order to be aware of which publications are being used.\n\n> Does that mean we currently sync all the columns in the initial sync, and\n> only start filtering columns later while decoding transactions?\n\nNo, it does filter the list of columns in the initial sync. But the\ncurrent implementation is bogus, because it obtains the list of *all*\npublications in which the table is published, not just the ones that the\nsubscription is configured to get data from. And the sync code doesn't\nreceive the list of publications. We need more thorough patching of the\nsync code to close that hole.\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Fri, 17 Dec 2021 22:34:16 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 12/18/21 02:34, Alvaro Herrera wrote:\n> On 2021-Dec-17, Tomas Vondra wrote:\n> \n>> On 12/17/21 22:07, Alvaro Herrera wrote:\n>>> So I've been thinking about this as a \"security\" item (you can see my\n>>> comments to that effect sprinkled all over this thread), in the sense\n>>> that if a publication \"hides\" some column, then the replica just won't\n>>> get access to it. But in reality that's mistaken: the filtering that\n>>> this patch implements is done based on the queries that *the replica*\n>>> executes at its own volition; if the replica decides to ignore the list\n>>> of columns, it'll be able to get all columns. All it takes is an\n>>> uncooperative replica in order for the lot of data to be exposed anyway.\n>>\n>> Interesting, I haven't really looked at this as a security feature. And in\n>> my experience if something is not carefully designed to be secure from the\n>> get go, it's really hard to add that bit later ...\n> \n> I guess the way to really harden replication is to use the GRANT system\n> at the publisher's side to restrict access for the replication user.\n> This would provide actual security. So you're right that I seem to be\n> barking at the wrong tree ... maybe I need to give a careful look at\n> the documentation for logical replication to understand what is being\n> offered, and to make sure that we explicitly indicate that limiting the\n> column list does not provide any actual security.\n> \n>> You say it's the replica making the decisions, but my mental model is it's\n>> the publisher decoding the data for a given list of publications (which\n>> indeed is specified by the subscriber). But the subscriber can't tweak the\n>> definition of publications, right? Or what do you mean by queries executed\n>> by the replica? What are the gap?\n> \n> I am thinking in somebody modifying the code that the replica runs, so\n> that it ignores the column list that the publication has been configured\n> to provide; instead of querying only those columns, it would query all\n> columns.\n> \n>>> If the server has a *separate* security mechanism to hide the columns\n>>> (per-column privs), it is that feature that will protect the data, not\n>>> the logical-replication-feature to filter out columns.\n>>\n>> Right. Although I haven't thought about how logical decoding interacts with\n>> column privileges. I don't think logical decoding actually checks column\n>> privileges - I certainly don't recall any ACL checks in\n>> src/backend/replication ...\n> \n> Well, in practice if you're confronted with a replica that's controlled\n> by a malicious user that can tweak its behavior, then replica-side\n> privilege checking won't do anything useful.\n> \n\nI don't follow. Surely the decoding happens on the primary node, right? \nWhich is where the ACL checks would happen, using the role the \nreplication connection is opened with.\n\n>>> This led me to realize that the replica-side code in tablesync.c is\n>>> totally oblivious to what's the publication through which a table is\n>>> being received from in the replica. So we're not aware of a replica\n>>> being exposed only a subset of columns through some specific\n>>> publication; and a lot more hacking is needed than this patch does, in\n>>> order to be aware of which publications are being used.\n> \n>> Does that mean we currently sync all the columns in the initial sync, and\n>> only start filtering columns later while decoding transactions?\n> \n> No, it does filter the list of columns in the initial sync. But the\n> current implementation is bogus, because it obtains the list of *all*\n> publications in which the table is published, not just the ones that the\n> subscription is configured to get data from. And the sync code doesn't\n> receive the list of publications. We need more thorough patching of the\n> sync code to close that hole.\n\nAh, got it. Thanks for the explanation. Yeah, that makes no sense.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sat, 18 Dec 2021 02:59:00 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Sat, Dec 18, 2021 at 7:04 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Dec-17, Tomas Vondra wrote:\n>\n> > On 12/17/21 22:07, Alvaro Herrera wrote:\n> > > So I've been thinking about this as a \"security\" item (you can see my\n> > > comments to that effect sprinkled all over this thread), in the sense\n> > > that if a publication \"hides\" some column, then the replica just won't\n> > > get access to it. But in reality that's mistaken: the filtering that\n> > > this patch implements is done based on the queries that *the replica*\n> > > executes at its own volition; if the replica decides to ignore the list\n> > > of columns, it'll be able to get all columns. All it takes is an\n> > > uncooperative replica in order for the lot of data to be exposed anyway.\n> >\n> > Interesting, I haven't really looked at this as a security feature. And in\n> > my experience if something is not carefully designed to be secure from the\n> > get go, it's really hard to add that bit later ...\n>\n> I guess the way to really harden replication is to use the GRANT system\n> at the publisher's side to restrict access for the replication user.\n> This would provide actual security. So you're right that I seem to be\n> barking at the wrong tree ... maybe I need to give a careful look at\n> the documentation for logical replication to understand what is being\n> offered, and to make sure that we explicitly indicate that limiting the\n> column list does not provide any actual security.\n>\n\nIIRC, the use cases as mentioned by other databases (like Oracle) are\n(a) this helps when the target table doesn't have the same set of\ncolumns or (b) when the columns contain some sensitive information\nlike personal identification number, etc. I think there could be a\nside benefit in this which comes from the fact that the lesser data\nwill flow across the network which could lead to faster replication\nespecially when the user filters large column data.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Sat, 18 Dec 2021 09:22:05 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Dec 17, 2021 at 3:16 PM houzj.fnst@fujitsu.com\n<houzj.fnst@fujitsu.com> wrote:\n>\n> On Friday, December 17, 2021 1:55 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > On 2021-Dec-16, houzj.fnst@fujitsu.com wrote:\n> >\n> > > The patch ensures all columns of RT are in column list when\n> > > CREATE/ALTER publication, but it seems doesn't prevent user from\n> > > changing the replica identity or dropping the index used in replica\n> > > identity. Do we also need to check those cases ?\n> >\n> > Yes, we do. As it happens, I spent a couple of hours yesterday writing code for\n> > that, at least partially. I haven't yet checked what happens with cases like\n> > REPLICA NOTHING, or REPLICA INDEX <xyz> and then dropping that index.\n> >\n> > My initial ideas were a bit wrong BTW: I thought we should check the\n> > combination of column lists in all publications (a bitwise-OR of column bitmaps,\n> > so to speak). But conceptually that's wrong: we need to check the column list\n> > of each publication individually instead. Otherwise, if you wanted to hide a\n> > column from some publication but that column was part of the replica identity,\n> > there'd be no way to identify the tuple in the replica. (Or, if the pgouput code\n> > disobeys the column list and sends the replica identity even if it's not in the\n> > column list, then you'd be potentially publishing data that you wanted to hide.)\n>\n> Thanks for the explanation.\n>\n> Apart from ALTER REPLICA IDENTITY and DROP INDEX, I think there could be\n> some other cases we need to handle for the replica identity check:\n>\n> 1)\n> When adding a partitioned table with column list to the publication, I think we\n> need to check the RI of all its leaf partition. Because the RI on the partition\n> is the one actually takes effect.\n>\n> 2)\n> ALTER TABLE ADD PRIMARY KEY;\n> ALTER TABLE DROP CONSTRAINT \"PRIMAEY KEY\";\n>\n> If the replica identity is default, it will use the primary key. we might also\n> need to prevent user from adding or removing primary key in this case.\n>\n>\n> Based on the above cases, the RI check seems could bring considerable amount of\n> code. So, how about we follow what we already did in CheckCmdReplicaIdentity(),\n> we can put the check for RI in that function, so that we can cover all the\n> cases and reduce the code change. And if we are worried about the cost of do\n> the check for UPDATE and DELETE every time, we can also save the result in the\n> relcache. It's safe because every operation change the RI will invalidate the\n> relcache. We are using this approach in row filter patch to make sure all\n> columns in row filter expression are part of RI.\n>\n\nAnother point related to RI is that this patch seems to restrict\nspecifying the RI columns in the column filter list irrespective of\npublish action. Do we need to have such a restriction if the\npublication publishes 'insert' or 'truncate'?\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Sat, 18 Dec 2021 09:37:02 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-18, Tomas Vondra wrote:\n\n> On 12/18/21 02:34, Alvaro Herrera wrote:\n> > On 2021-Dec-17, Tomas Vondra wrote:\n\n> > > > If the server has a *separate* security mechanism to hide the\n> > > > columns (per-column privs), it is that feature that will protect\n> > > > the data, not the logical-replication-feature to filter out\n> > > > columns.\n> > > \n> > > Right. Although I haven't thought about how logical decoding\n> > > interacts with column privileges. I don't think logical decoding\n> > > actually checks column privileges - I certainly don't recall any\n> > > ACL checks in src/backend/replication ...\n> > \n> > Well, in practice if you're confronted with a replica that's\n> > controlled by a malicious user that can tweak its behavior, then\n> > replica-side privilege checking won't do anything useful.\n> \n> I don't follow. Surely the decoding happens on the primary node,\n> right? Which is where the ACL checks would happen, using the role the\n> replication connection is opened with.\n\nI think you do follow. Yes, the decoding happens on the primary node,\nand the security checks should occur in the primary node, because to do\notherwise is folly(*). Which means that column filtering, being a\nreplica-side feature, is *not* a security feature. I was mistaken about\nit, is all. If you want security, you need to use column-level\nprivileges, as you say.\n\n(*) The checks *must* occur in the primary side, because the primary\ndoes not control the code that runs in the replica side. The primary\nmust treat the replica as running potentially hostile code. Trying to\ndefend against that is not practical.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Sat, 18 Dec 2021 13:51:56 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 17.12.21 22:07, Alvaro Herrera wrote:\n> So I've been thinking about this as a \"security\" item (you can see my\n> comments to that effect sprinkled all over this thread), in the sense\n> that if a publication \"hides\" some column, then the replica just won't\n> get access to it. But in reality that's mistaken: the filtering that\n> this patch implements is done based on the queries that *the replica*\n> executes at its own volition; if the replica decides to ignore the list\n> of columns, it'll be able to get all columns. All it takes is an\n> uncooperative replica in order for the lot of data to be exposed anyway.\n\nDuring normal replication, the publisher should only send the columns \nthat are configured to be part of the publication. So I don't see a \nproblem there.\n\nDuring the initial table sync, the subscriber indeed can construct any \nCOPY command. We could maybe replace this with a more customized COPY \ncommand variant, like COPY table OF publication TO STDOUT.\n\nBut right now the subscriber is sort of assumed to have access to \neverything on the publisher anyway, so I doubt that this is the only \nproblem. But it's worth considering.\n\n\n", "msg_date": "Mon, 20 Dec 2021 14:53:46 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Determining that an array has a NULL element seems convoluted. I ended\nup with this query, where comparing the result of array_positions() with\nan empty array does that. If anybody knows of a simpler way, or any\nsituations in which this fails, I'm all ears.\n\nwith published_cols as (\n select case when\n pg_catalog.array_positions(pg_catalog.array_agg(unnest), null) <> '{}' then null else\n pg_catalog.array_agg(distinct unnest order by unnest) end AS attrs\n from pg_catalog.pg_publication p join\n pg_catalog.pg_publication_rel pr on (p.oid = pr.prpubid) left join\n unnest(prattrs) on (true)\n where prrelid = 38168 and p.pubname in ('pub1', 'pub2')\n)\nSELECT a.attname,\n a.atttypid,\n a.attnum = ANY(i.indkey)\n FROM pg_catalog.pg_attribute a\n LEFT JOIN pg_catalog.pg_index i\n ON (i.indexrelid = pg_get_replica_identity_index(38168)),\n published_cols\n WHERE a.attnum > 0::pg_catalog.int2\n AND NOT a.attisdropped and a.attgenerated = ''\n AND a.attrelid = 38168\n AND (published_cols.attrs IS NULL OR attnum = ANY(published_cols.attrs))\n ORDER BY a.attnum;\n\nThis returns all columns if at least one publication has a NULL prattrs,\nor only the union of columns listed in all publications, if all\npublications have a list of columns.\n\n(I was worried about obtaining the list of publications, but it turns\nout that it's already as a convenient list of OIDs in the MySubscription\nstruct.)\n\nWith this, we can remove the second query added by Rahila's original patch to\nfilter out nonpublished columns.\n\nI still need to add pg_partition_tree() in order to search for\npublications containing a partition ancestor. I'm not yet sure what\nhappens (and what *should* happen) if an ancestor is part of a\npublication and the partition is also part of a publication, and the\ncolumn lists differ.\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\nAl principio era UNIX, y UNIX habló y dijo: \"Hello world\\n\".\nNo dijo \"Hello New Jersey\\n\", ni \"Hello USA\\n\".\n\n\n", "msg_date": "Mon, 27 Dec 2021 14:06:18 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> Determining that an array has a NULL element seems convoluted. I ended\n> up with this query, where comparing the result of array_positions() with\n> an empty array does that. If anybody knows of a simpler way, or any\n> situations in which this fails, I'm all ears.\n\nMaybe better to rethink why we allow elements of prattrs to be null?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 27 Dec 2021 12:38:29 -0500", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-27, Tom Lane wrote:\n\n> Alvaro Herrera <alvherre@alvh.no-ip.org> writes:\n> > Determining that an array has a NULL element seems convoluted. I ended\n> > up with this query, where comparing the result of array_positions() with\n> > an empty array does that. If anybody knows of a simpler way, or any\n> > situations in which this fails, I'm all ears.\n> \n> Maybe better to rethink why we allow elements of prattrs to be null?\n\nWhat I'm doing is an unnest of all arrays and then aggregating them\nback into a single array. If one array is null, the resulting aggregate\ncontains a null element.\n\nHmm, maybe I can in parallel do a bool_or() aggregate of \"array is null\" to\navoid that. ... ah yes, that works:\n\nwith published_cols as (\n select pg_catalog.bool_or(pr.prattrs is null) as all_columns,\n pg_catalog.array_agg(distinct unnest order by unnest) AS attrs\n from pg_catalog.pg_publication p join\n pg_catalog.pg_publication_rel pr on (p.oid = pr.prpubid) left join\n unnest(prattrs) on (true)\n where prrelid = :table and p.pubname in ('pub1', 'pub2')\n)\nSELECT a.attname,\n a.atttypid,\n a.attnum = ANY(i.indkey)\n FROM pg_catalog.pg_attribute a\n LEFT JOIN pg_catalog.pg_index i\n ON (i.indexrelid = pg_get_replica_identity_index(:table)),\n published_cols\n WHERE a.attnum > 0::pg_catalog.int2\n AND NOT a.attisdropped and a.attgenerated = ''\n AND a.attrelid = :table\n AND (all_columns OR attnum = ANY(published_cols.attrs))\n ORDER BY a.attnum ;\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Mon, 27 Dec 2021 15:31:46 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "OK, getting closer now. I've fixed the code to filter them column list\nduring the initial sync, and added some more tests for code that wasn't\ncovered.\n\nThere are still some XXX comments. The one that bothers me most is the\nlack of an implementation that allows changing the column list in a\npublication without having to remove the table from the publication\nfirst.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\"I'm always right, but sometimes I'm more right than other times.\"\n (Linus Torvalds)", "msg_date": "Tue, 28 Dec 2021 18:04:27 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-28, Alvaro Herrera wrote:\n\n> There are still some XXX comments. The one that bothers me most is the\n> lack of an implementation that allows changing the column list in a\n> publication without having to remove the table from the publication\n> first.\n\nOK, I made some progress on this front; I added new forms of ALTER\nPUBLICATION to support it:\n\nALTER PUBLICATION pub1 ALTER TABLE tbl SET COLUMNS (a, b, c);\nALTER PUBLICATION pub1 ALTER TABLE tbl SET COLUMNS ALL;\n\n(not wedded to this syntax; other suggestions welcome)\n\nIn order to implement it I changed the haphazardly chosen use of\nDEFELEM actions to a new enum. I also noticed that the division of\nlabor between pg_publication.c and publicationcmds.c is quite broken\n(code to translate column names to numbers is in the former, should be\nin the latter; some code that deals with pg_publication tuples is in the\nlatter, should be in the former, such as CreatePublication,\nAlterPublicationOptions).\n\nThis new stuff is not yet finished. For example I didn't refactor\nhandling of REPLICA IDENTITY, so the new command does not correctly\ncheck everything, such as the REPLICA IDENTITY FULL stuff. Also, no\ntests have been added yet. In manual tests it seems to behave as\nexpected.\n\nI noticed that prattrs is inserted in user-specified order instead of\ncatalog order, which is innocuous but quite weird.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"No renuncies a nada. No te aferres a nada.\"", "msg_date": "Wed, 29 Dec 2021 21:15:08 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-29, Alvaro Herrera wrote:\n\n> This new stuff is not yet finished. For example I didn't refactor\n> handling of REPLICA IDENTITY, so the new command does not correctly\n> check everything, such as the REPLICA IDENTITY FULL stuff. Also, no\n> tests have been added yet. In manual tests it seems to behave as\n> expected.\n\nFixing the lack of check for replica identity full didn't really require\nmuch refactoring, so I did it that way.\n\nI split it with some trivial fixes that can be committed separately\nahead of time. I'm thinking in committing 0001 later today, perhaps\n0002 tomorrow. The interesting part is 0003.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/", "msg_date": "Thu, 30 Dec 2021 17:21:28 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "> +\tbool\t\tam_partition = false;\n>...\n>\tAssert(!isnull);\n>\tlrel->relkind = DatumGetChar(slot_getattr(slot, 3, &isnull));\n>\tAssert(!isnull);\n> +\tam_partition = DatumGetChar(slot_getattr(slot, 4, &isnull));\n\nI think this needs to be GetBool.\nYou should Assert(!isnull) like the others.\nAlso, I think it doesn't need to be initialized to \"false\".\n\n> +\t\t/*\n> +\t\t * Even if the user listed all columns in the column list, we cannot\n> +\t\t * allow a column list to be specified when REPLICA IDENTITY is FULL;\n> +\t\t * that would cause problems if a new column is added later, because\n> +\t\t * that could would have to be included (because of being part of the\n\ncould would is wrong\n\n> +\t/*\n> +\t * Translate list of columns to attnums. We prohibit system attributes and\n> +\t * make sure there are no duplicate columns.\n> +\t *\n> +\t */\n\nextraneous line\n\n> +/*\n> + * Gets a list of OIDs of all column-partial publications of the given\n> + * relation, that is, those that specify a column list.\n\nI would call this a \"partial-column\" publication.\n\n> +\t\t\t\t\terrmsg(\"cannot set REPLICA IDENTITY FULL when column-partial publications exist\"));\n> +\t * Check column-partial publications. All publications have to include all\n\nsame\n\n> +\t/*\n> +\t * Store the column names only if they are contained in column filter\n\nperiod(.)\n\n> +\t * LogicalRepRelation will only contain attributes corresponding to those\n> +\t * specficied in column filters.\n\nspecified\n\n> --- a/src/include/catalog/pg_publication_rel.h\n> +++ b/src/include/catalog/pg_publication_rel.h\n> @@ -31,6 +31,9 @@ CATALOG(pg_publication_rel,6106,PublicationRelRelationId)\n> \tOid\t\t\toid;\t\t\t/* oid */\n> \tOid\t\t\tprpubid BKI_LOOKUP(pg_publication); /* Oid of the publication */\n> \tOid\t\t\tprrelid BKI_LOOKUP(pg_class);\t/* Oid of the relation */\n> +#ifdef CATALOG_VARLEN\n> +\tint2vector\tprattrs;\t\t/* Variable length field starts here */\n> +#endif\n\nThe language in the pre-existing comments is better:\n\t/* variable-length fields start here */\n\n> @@ -791,12 +875,13 @@ fetch_remote_table_info(char *nspname, char *relname,\n>\n> ExecClearTuple(slot);\n> }\n> +\n> ExecDropSingleTupleTableSlot(slot);\n> + walrcv_clear_result(res);\n> + pfree(cmd.data);\n>\n> lrel->natts = natt;\n>\n> - walrcv_clear_result(res);\n> - pfree(cmd.data);\n> }\n\nThe blank line after \"lrel->natts = natt;\" should be removed.\n\n\n", "msg_date": "Thu, 30 Dec 2021 16:16:34 -0600", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-30, Justin Pryzby wrote:\n\nThank you! I've incorporated your proposed fixes.\n\n> > +\t\t/*\n> > +\t\t * Even if the user listed all columns in the column list, we cannot\n> > +\t\t * allow a column list to be specified when REPLICA IDENTITY is FULL;\n> > +\t\t * that would cause problems if a new column is added later, because\n> > +\t\t * that could would have to be included (because of being part of the\n> \n> could would is wrong\n\nHah, yeah, this was \"that column would\".\n\n> > + * Gets a list of OIDs of all column-partial publications of the given\n> > + * relation, that is, those that specify a column list.\n> \n> I would call this a \"partial-column\" publication.\n\nOK, done that way.\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/", "msg_date": "Thu, 30 Dec 2021 20:32:29 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "> @@ -5963,8 +5967,20 @@ describePublications(const char *pattern)\n> \t\t{\n> \t\t\t/* Get the tables for the specified publication */\n> \t\t\tprintfPQExpBuffer(&buf,\n> -\t\t\t\t\t\t\t \"SELECT n.nspname, c.relname\\n\"\n> -\t\t\t\t\t\t\t \"FROM pg_catalog.pg_class c,\\n\"\n> +\t\t\t\t\t\t\t \"SELECT n.nspname, c.relname, \\n\");\n> +\t\t\tif (pset.sversion >= 150000)\n> +\t\t\t\tappendPQExpBufferStr(&buf,\n> +\t\t\t\t\t\t\t\t\t \" CASE WHEN pr.prattrs IS NOT NULL THEN\\n\"\n> +\t\t\t\t\t\t\t\t\t \" pg_catalog.array_to_string\"\n> +\t\t\t\t\t\t\t\t\t \"(ARRAY(SELECT attname\\n\"\n> +\t\t\t\t\t\t\t\t\t \" FROM pg_catalog.generate_series(0, pg_catalog.array_upper(pr.prattrs::int[], 1)) s,\\n\"\n> +\t\t\t\t\t\t\t\t\t \" pg_catalog.pg_attribute\\n\"\n> +\t\t\t\t\t\t\t\t\t \" WHERE attrelid = c.oid AND attnum = prattrs[s]), ', ')\\n\"\n> +\t\t\t\t\t\t\t\t\t \" ELSE NULL END AS columns\");\n> +\t\t\telse\n> +\t\t\t\tappendPQExpBufferStr(&buf, \"NULL as columns\");\n> +\t\t\tappendPQExpBuffer(&buf,\n> +\t\t\t\t\t\t\t \"\\nFROM pg_catalog.pg_class c,\\n\"\n> \t\t\t\t\t\t\t \" pg_catalog.pg_namespace n,\\n\"\n> \t\t\t\t\t\t\t \" pg_catalog.pg_publication_rel pr\\n\"\n> \t\t\t\t\t\t\t \"WHERE c.relnamespace = n.oid\\n\"\n\nI suppose this should use pr.prattrs::pg_catalog.int2[] ?\n\nDid the DatumGetBool issue expose a deficiency in testing ?\nI think the !am_partition path was never being hit.\n\n-- \nJustin\n\n\n", "msg_date": "Fri, 31 Dec 2021 10:32:27 -0600", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2021-Dec-31, Justin Pryzby wrote:\n\n> > @@ -5963,8 +5967,20 @@ describePublications(const char *pattern)\n> > \t\t{\n> > +\t\t\t\t\t\t\t\t\t \" CASE WHEN pr.prattrs IS NOT NULL THEN\\n\"\n> > +\t\t\t\t\t\t\t\t\t \" pg_catalog.array_to_string\"\n> > +\t\t\t\t\t\t\t\t\t \"(ARRAY(SELECT attname\\n\"\n> > +\t\t\t\t\t\t\t\t\t \" FROM pg_catalog.generate_series(0, pg_catalog.array_upper(pr.prattrs::int[], 1)) s,\\n\"\n> > +\t\t\t\t\t\t\t\t\t \" pg_catalog.pg_attribute\\n\"\n> > +\t\t\t\t\t\t\t\t\t \" WHERE attrelid = c.oid AND attnum = prattrs[s]), ', ')\\n\"\n> > +\t\t\t\t\t\t\t\t\t \" ELSE NULL END AS columns\");\n\n> I suppose this should use pr.prattrs::pg_catalog.int2[] ?\n\nTrue. Changed that.\n\nAnother change in this v15 is that I renamed the test file from \".patch\"\nto \".pl\". I suppose I mistyped the extension when renumbering from 021\nto 028.\n\n> Did the DatumGetBool issue expose a deficiency in testing ?\n> I think the !am_partition path was never being hit.\n\nHmm, the TAP test creates a subscription that contains both types of\ntables. I tried adding an assert for each case, and they were both hit\non running the test.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\"La persona que no quería pecar / estaba obligada a sentarse\n en duras y empinadas sillas / desprovistas, por cierto\n de blandos atenuantes\" (Patricio Vogel)", "msg_date": "Mon, 3 Jan 2022 11:31:39 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Jan 03, 2022 at 11:31:39AM -0300, Alvaro Herrera wrote:\n> > Did the DatumGetBool issue expose a deficiency in testing ?\n> > I think the !am_partition path was never being hit.\n> \n> Hmm, the TAP test creates a subscription that contains both types of\n> tables. I tried adding an assert for each case, and they were both hit\n> on running the test.\n\nYes, I know both paths are hit now that it uses GetBool.\n\nWhat I'm wondering is why tests didn't fail when one path wasn't hit - when it\nsaid am_partition=DatumGetChar(); if (!am_partition){}\n\nI suppose it's because the am_partition=true case correctly handles\nnonpartitions.\n\nMaybe the !am_partition case should be removed, and add a comment that\npg_partition_tree(pg_partition_root(%u))) also handles non-partitions.\nOr maybe that's inefficient...\n\n-- \nJustin\n\n\n", "msg_date": "Mon, 3 Jan 2022 08:40:57 -0600", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2022-Jan-03, Justin Pryzby wrote:\n\n> Yes, I know both paths are hit now that it uses GetBool.\n> \n> What I'm wondering is why tests didn't fail when one path wasn't hit - when it\n> said am_partition=DatumGetChar(); if (!am_partition){}\n\nAh!\n\n> I suppose it's because the am_partition=true case correctly handles\n> nonpartitions.\n> \n> Maybe the !am_partition case should be removed, and add a comment that\n> pg_partition_tree(pg_partition_root(%u))) also handles non-partitions.\n> Or maybe that's inefficient...\n\nHmm, that doesn't sound true. Running the query manually, you get an\nempty list if you use pg_partition_tree(pg_partition_root) with a\nnon-partition. Maybe what was happening is that all columns were being\ntransmitted instead of only the required columns. Maybe you're right\nthat the test isn't complete enough.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Mon, 3 Jan 2022 12:01:59 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Jan 3, 2022 at 8:01 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n\nfetch_remote_table_info()\n{\n..\n+ appendStringInfo(&cmd,\n+ \" SELECT pg_catalog.unnest(prattrs)\\n\"\n+ \" FROM pg_catalog.pg_publication p JOIN\\n\"\n+ \" pg_catalog.pg_publication_rel pr ON (p.oid = pr.prpubid)\\n\"\n+ \" WHERE p.pubname IN (%s) AND\\n\",\n+ publications.data);\n+ if (!am_partition)\n+ appendStringInfo(&cmd, \"prrelid = %u\", lrel->remoteid);\n+ else\n+ appendStringInfo(&cmd,\n+ \"prrelid IN (SELECT relid\\n\"\n+ \" FROM pg_catalog.pg_partition_tree(pg_catalog.pg_partition_root(%u)))\",\n+ lrel->remoteid);\n\nIIUC, this doesn't deal with cases when some publication has not\nspecified table attrs. In those cases, I think it should return all\nattrs? Also, it is not very clear to me what exactly we want to do\nwith partitions?\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 6 Jan 2022 14:07:33 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Dec 27, 2021 at 10:36 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> Determining that an array has a NULL element seems convoluted. I ended\n> up with this query, where comparing the result of array_positions() with\n> an empty array does that. If anybody knows of a simpler way, or any\n> situations in which this fails, I'm all ears.\n>\n> with published_cols as (\n> select case when\n> pg_catalog.array_positions(pg_catalog.array_agg(unnest), null) <> '{}' then null else\n> pg_catalog.array_agg(distinct unnest order by unnest) end AS attrs\n> from pg_catalog.pg_publication p join\n> pg_catalog.pg_publication_rel pr on (p.oid = pr.prpubid) left join\n> unnest(prattrs) on (true)\n> where prrelid = 38168 and p.pubname in ('pub1', 'pub2')\n> )\n> SELECT a.attname,\n> a.atttypid,\n> a.attnum = ANY(i.indkey)\n> FROM pg_catalog.pg_attribute a\n> LEFT JOIN pg_catalog.pg_index i\n> ON (i.indexrelid = pg_get_replica_identity_index(38168)),\n> published_cols\n> WHERE a.attnum > 0::pg_catalog.int2\n> AND NOT a.attisdropped and a.attgenerated = ''\n> AND a.attrelid = 38168\n> AND (published_cols.attrs IS NULL OR attnum = ANY(published_cols.attrs))\n> ORDER BY a.attnum;\n>\n> This returns all columns if at least one publication has a NULL prattrs,\n> or only the union of columns listed in all publications, if all\n> publications have a list of columns.\n>\n\nConsidering this, don't we need to deal with \"For All Tables\" and \"For\nAll Tables In Schema ..\" Publications in this query? The row filter\npatch deal with such cases. The row filter patch handles the NULL case\nvia C code which makes the query relatively simpler. I am not sure if\nthe same logic can be used here but having a simple query here have\nmerit that if we want to use a single query to fetch both column and\nrow filters then we should be able to enhance it without making it\nfurther complicated.\n\n> (I was worried about obtaining the list of publications, but it turns\n> out that it's already as a convenient list of OIDs in the MySubscription\n> struct.)\n>\n> With this, we can remove the second query added by Rahila's original patch to\n> filter out nonpublished columns.\n>\n> I still need to add pg_partition_tree() in order to search for\n> publications containing a partition ancestor. I'm not yet sure what\n> happens (and what *should* happen) if an ancestor is part of a\n> publication and the partition is also part of a publication, and the\n> column lists differ.\n>\n\nShouldn't we try to have a behavior similar to the row filter patch\nfor this case? The row filter patch behavior is as follows: \"If your\npublication contains a partitioned table, the publication parameter\npublish_via_partition_root determines if it uses the partition row\nfilter (if the parameter is false, the default) or the root\npartitioned table row filter. During initial tablesync, it doesn't do\nany special handling for partitions.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 6 Jan 2022 15:11:30 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2022-Jan-06, Amit Kapila wrote:\n\n> On Mon, Jan 3, 2022 at 8:01 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> >\n> \n> fetch_remote_table_info()\n> {\n> ..\n> + appendStringInfo(&cmd,\n> + \" SELECT pg_catalog.unnest(prattrs)\\n\"\n> + \" FROM pg_catalog.pg_publication p JOIN\\n\"\n> + \" pg_catalog.pg_publication_rel pr ON (p.oid = pr.prpubid)\\n\"\n> + \" WHERE p.pubname IN (%s) AND\\n\",\n> + publications.data);\n> + if (!am_partition)\n> + appendStringInfo(&cmd, \"prrelid = %u\", lrel->remoteid);\n> + else\n> + appendStringInfo(&cmd,\n> + \"prrelid IN (SELECT relid\\n\"\n> + \" FROM pg_catalog.pg_partition_tree(pg_catalog.pg_partition_root(%u)))\",\n> + lrel->remoteid);\n> \n> IIUC, this doesn't deal with cases when some publication has not\n> specified table attrs. In those cases, I think it should return all\n> attrs?\n\nHmm, no, the idea here is that the list of columns should be null; the\ncode that uses this result is supposed to handle a null result to mean\nhat all columns are included.\n\n> Also, it is not very clear to me what exactly we want to do\n> with partitions?\n\n... Hmm, maybe there is a gap in testing here, I'll check; but the idea\nis that we would use the column list of the most immediate ancestor that\nhas one, if the partition itself doesn't have one. (I see we're missing\na check for \"pubviaroot\", which should represent an override. Need more\ntests here.)\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"Uno puede defenderse de los ataques; contra los elogios se esta indefenso\"\n\n\n", "msg_date": "Thu, 6 Jan 2022 09:22:54 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2022-Jan-06, Amit Kapila wrote:\n\n> Considering this, don't we need to deal with \"For All Tables\" and \"For\n> All Tables In Schema ..\" Publications in this query? The row filter\n> patch deal with such cases. The row filter patch handles the NULL case\n> via C code which makes the query relatively simpler.\n\nYes. I realized after sending that email that the need to handle schema\npublications would make a single query very difficult, so I ended up\nsplitting it again in two queries, which is what you see in the latest\nversion submitted.\n\n> I am not sure if the same logic can be used here but having a simple\n> query here have merit that if we want to use a single query to fetch\n> both column and row filters then we should be able to enhance it\n> without making it further complicated.\n\nI have looked the row filter code a couple of times to make sure we're\nsomewhat compatible, but didn't look closely enough to see if we can\nmake the queries added by both patches into a single one.\n\n> Shouldn't we try to have a behavior similar to the row filter patch\n> for this case? The row filter patch behavior is as follows: \"If your\n> publication contains a partitioned table, the publication parameter\n> publish_via_partition_root determines if it uses the partition row\n> filter (if the parameter is false, the default) or the root\n> partitioned table row filter. During initial tablesync, it doesn't do\n> any special handling for partitions.\n\nI'll have a look.\n\nThanks for looking!\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Thu, 6 Jan 2022 09:29:31 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "I think this is getting pretty good now. I like the overall behavior now.\n\nSome details:\n\nThere are still a few references to \"filter\", but I see most of the\npatch now uses column list or something. Maybe do another cleanup\npass before finalizing the patch.\n\ndoc/src/sgml/catalogs.sgml needs to be updated.\n\ndoc/src/sgml/ref/alter_publication.sgml:\n\n\"allows to change\" -> \"allows changing\"\n\nsrc/backend/catalog/pg_publication.c:\n\npublication_translate_columns(): I find the style of having a couple\nof output arguments plus a return value that is actually another\noutput value confusing. (It would be different if the return value\nwas some kind of success value.) Let's make it all output arguments.\n\nAbout the XXX question there: I would make the column numbers always\nsorted. I don't have a strong reason for this, but otherwise we might\nget version differences, unstable dumps etc. It doesn't seem\ncomplicated to keep this a bit cleaner.\n\nI think publication_translate_columns() also needs to prohibit\ngenerated columns. We already exclude those implicitly throughout the\nlogical replication code, but if a user explicitly set one here,\nthings would probably break.\n\nsrc/backend/commands/tablecmds.c:\n\nATExecReplicaIdentity(): Regarding the question of how to handle\nREPLICA_IDENTITY_NOTHING: I see two ways to do this. Right now, the\napproach is that the user can set the replica identity freely, and we\ndecide later based on that what we can replicate (e.g., no updates).\nFor this patch, that would mean we don't restrict what columns can be\nin the column list, but we check what actions we can replicate based\non the column list. The alternative is that we require the column\nlist to include the replica identity, as the patch is currently doing,\nwhich would mean that REPLICA_IDENTITY_NOTHING can be allowed since\nit's essentially a set of zero columns.\n\nI find the current behavior a bit weird on reflection. If a user\nwants to replicate on some columns and only INSERTs, that should be\nallowed regardless of what the replica identity columns are.\n\nsrc/backend/replication/pgoutput/pgoutput.c:\n\nIn get_rel_sync_entry(), why did you remove the block\n\n- if (entry->pubactions.pubinsert && \nentry->pubactions.pubupdate &&\n- entry->pubactions.pubdelete && \nentry->pubactions.pubtruncate)\n- break;\n\nMaybe this is intentional, but it's not clear to me.\n\n\n", "msg_date": "Fri, 7 Jan 2022 12:46:35 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Jan 7, 2022 at 5:16 PM Peter Eisentraut\n<peter.eisentraut@enterprisedb.com> wrote:\n>\n> src/backend/commands/tablecmds.c:\n>\n> ATExecReplicaIdentity(): Regarding the question of how to handle\n> REPLICA_IDENTITY_NOTHING: I see two ways to do this. Right now, the\n> approach is that the user can set the replica identity freely, and we\n> decide later based on that what we can replicate (e.g., no updates).\n>\n\n+1. This is what we are trying to do with the row filter patch. It\nseems Hou-San has also mentioned the same on this thread [1].\n\n> For this patch, that would mean we don't restrict what columns can be\n> in the column list, but we check what actions we can replicate based\n> on the column list. The alternative is that we require the column\n> list to include the replica identity, as the patch is currently doing,\n> which would mean that REPLICA_IDENTITY_NOTHING can be allowed since\n> it's essentially a set of zero columns.\n>\n> I find the current behavior a bit weird on reflection. If a user\n> wants to replicate on some columns and only INSERTs, that should be\n> allowed regardless of what the replica identity columns are.\n>\n\nRight, I also raised the same point [2] related to INSERTs.\n\n[1] - https://www.postgresql.org/message-id/OS0PR01MB5716330FFE3803DF887D073C94789%40OS0PR01MB5716.jpnprd01.prod.outlook.com\n[2] - https://www.postgresql.org/message-id/CAA4eK1%2BFoJ-J7wUG5s8zCtY0iBuN9LcjQcYhV4BD17xhuHfoug%40mail.gmail.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 7 Jan 2022 18:38:58 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "In this version I have addressed these points, except the REPLICA\nIDENTITY NOTHING stuff.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\"The eagle never lost so much time, as\nwhen he submitted to learn of the crow.\" (William Blake)", "msg_date": "Mon, 10 Jan 2022 18:38:40 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2022-Jan-07, Peter Eisentraut wrote:\n\n> ATExecReplicaIdentity(): Regarding the question of how to handle\n> REPLICA_IDENTITY_NOTHING: I see two ways to do this. Right now, the\n> approach is that the user can set the replica identity freely, and we\n> decide later based on that what we can replicate (e.g., no updates).\n> For this patch, that would mean we don't restrict what columns can be\n> in the column list, but we check what actions we can replicate based\n> on the column list. The alternative is that we require the column\n> list to include the replica identity, as the patch is currently doing,\n> which would mean that REPLICA_IDENTITY_NOTHING can be allowed since\n> it's essentially a set of zero columns.\n> \n> I find the current behavior a bit weird on reflection. If a user\n> wants to replicate on some columns and only INSERTs, that should be\n> allowed regardless of what the replica identity columns are.\n\nHmm. So you're saying that we should only raise errors about the column\nlist if we are publishing UPDATE or DELETE, but otherwise let the\nreplica identity be anything. OK, I'll see if I can come up with a\nreasonable set of rules ...\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"Before you were born your parents weren't as boring as they are now. They\ngot that way paying your bills, cleaning up your room and listening to you\ntell them how idealistic you are.\" -- Charles J. Sykes' advice to teenagers\n\n\n", "msg_date": "Mon, 10 Jan 2022 21:28:48 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2022-Jan-10, Alvaro Herrera wrote:\n\n> Hmm. So you're saying that we should only raise errors about the column\n> list if we are publishing UPDATE or DELETE, but otherwise let the\n> replica identity be anything. OK, I'll see if I can come up with a\n> reasonable set of rules ...\n\nThis is an attempt to do it that way. Now you can add a table to a\npublication without regards for how column filter compares to the\nreplica identity, as long as the publication does not include updates\nand inserts.\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\"La fuerza no está en los medios físicos\nsino que reside en una voluntad indomable\" (Gandhi)", "msg_date": "Tue, 11 Jan 2022 15:52:22 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Is there any coordination between the \"column filter\" patch and the \"row\nfilter\" patch ? Are they both on track for PG15 ? Has anybody run them\ntogether ?\n\nWhichever patch is merged 2nd should include tests involving a subset of\ncolumns along with a WHERE clause.\n\nI have a suggestion: for the functions for which both patches are adding\nadditional argument types, define a filtering structure for both patches to\nuse. Similar to what we did for some utility statements in a3dc92600.\n\nI'm referring to:\nlogicalrep_write_update()\nlogicalrep_write_tuple()\n\nThat would avoid avoid some rebase conflicts on april 9, and avoid functions\nwith 7,8,9 arguments, and maybe simplify adding arguments in the future.\n\n-- \nJustin\n\n\n", "msg_date": "Tue, 11 Jan 2022 15:10:53 -0600", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2022-Jan-11, Alvaro Herrera wrote:\n\n> On 2022-Jan-10, Alvaro Herrera wrote:\n> \n> > Hmm. So you're saying that we should only raise errors about the column\n> > list if we are publishing UPDATE or DELETE, but otherwise let the\n> > replica identity be anything. OK, I'll see if I can come up with a\n> > reasonable set of rules ...\n> \n> This is an attempt to do it that way. Now you can add a table to a\n> publication without regards for how column filter compares to the\n> replica identity, as long as the publication does not include updates\n> and inserts.\n\nI discovered a big hole in this, which is that ALTER PUBLICATION SET\n(publish='insert,update') can add UPDATE publishing to a publication\nthat was only publishing INSERTs. It's easy to implement a fix: in\nAlterPublicationOptions, scan the list of tables and raise an error if\nany of them has a column list that doesn't include all the columns in\nthe replica identity.\n\nHowever, that proposal has an ugly flaw: there is no index on\npg_publication_rel.prpubid, which means that the only way to find the\nrelations we need to inspect is to seqscan pg_publication_rel.\n\nAlso, psql's query for \\dRp+ uses a seqscan in pg_publication_rel.\n\nTherefore, I propose to add an index on pg_publication_rel.prpubid.\n\n-- \nÁlvaro Herrera Valdivia, Chile — https://www.EnterpriseDB.com/\n\"¿Qué importan los años? Lo que realmente importa es comprobar que\na fin de cuentas la mejor edad de la vida es estar vivo\" (Mafalda)\n\n\n", "msg_date": "Tue, 11 Jan 2022 21:41:28 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 12.01.22 01:41, Alvaro Herrera wrote:\n> Therefore, I propose to add an index on pg_publication_rel.prpubid.\n\nThat seems very reasonable.\n\n\n", "msg_date": "Wed, 12 Jan 2022 12:30:13 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2022-Jan-11, Justin Pryzby wrote:\n\n> Is there any coordination between the \"column filter\" patch and the \"row\n> filter\" patch ?\n\nNot beyond the grammar, which I tested.\n\n> Are they both on track for PG15 ?\n\nI think they're both on track, yes.\n\n> Has anybody run them together ?\n\nNot me.\n\n> I have a suggestion: for the functions for which both patches are adding\n> additional argument types, define a filtering structure for both patches to\n> use. Similar to what we did for some utility statements in a3dc92600.\n> \n> I'm referring to:\n> logicalrep_write_update()\n> logicalrep_write_tuple()\n\nFixed: the row filter patch no longer adds extra arguments to those\nfunctions.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\"Tiene valor aquel que admite que es un cobarde\" (Fernandel)\n\n\n", "msg_date": "Wed, 12 Jan 2022 15:57:12 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Jan 12, 2022 at 2:40 AM Justin Pryzby <pryzby@telsasoft.com> wrote:\n>\n> Is there any coordination between the \"column filter\" patch and the \"row\n> filter\" patch ? Are they both on track for PG15 ? Has anybody run them\n> together ?\n>\n\nThe few things where I think we might need to define some common\nbehavior are as follows:\n\n1. Replica Identity handling: Currently the column filter patch gives\nan error during create/alter subscription if the specified column list\nis invalid (Replica Identity columns are missing). It also gives an\nerror if the user tries to change the replica identity. However, it\ndoesn't deal with cases where the user drops and adds a different\nprimary key that has a different set of columns which can lead to\nfailure during apply on the subscriber.\n\nI think another issue w.r.t column filter patch is that even while\ncreating publication (even for 'insert' publications) it should check\nthat all primary key columns must be part of published columns,\notherwise, it can fail while applying on subscriber as it will try to\ninsert NULL for the primary key column.\n\n2. Handling of partitioned tables vs. Replica Identity (RI): When\nadding a partitioned table with a column list to the publication (with\npublish_via_partition_root = false), we should check the Replica\nIdentity of all its leaf partition as the RI on the partition is the\none actually takes effect when publishing DML changes. We need to\ncheck RI while attaching the partition as well, as the newly added\npartitions will automatically become part of publication if the\npartitioned table is part of the publication. If we don't do this the\nlater deletes/updates can fail.\n\nAll these cases are dealt with in row filter patch because of the\non-the-fly check which means we check the validation of columns in row\nfilters while actual operation update/delete via\nCheckCmdReplicaIdentity and cache the result of same for future use.\nThis is inline with existing checks of RI vs. operations on tables.\nThe primary reason for this was we didn't want to handle validation of\nrow filters at so many places.\n\n3. Tablesync.c handling: Ideally, it would be good if we have a single\nquery to fetch both row filters and column filters but even if that is\nnot possible in the first version, the behavior should be same for\nboth queries w.r.t partitioned tables, For ALL Tables and For All\nTables In Schema cases.\n\nCurrently, the column filter patch doesn't seem to respect For ALL\nTables and For All Tables In Schema cases, basically, it just copies\nthe columns it finds through some of the publications even if one of\nthe publications is defined as For All Tables. The row filter patch\nignores the row filters if one of the publications is defined as For\nALL Tables and For All Tables In Schema.\n\nFor row filter patch, if the publication contains a partitioned table,\nthe publication parameter publish_via_partition_root determines if it\nuses the partition row filter (if the parameter is false, the default)\nor the root partitioned table row filter and this is taken care of\neven during the initial tablesync.\n\nFor column filter patch, if the publication contains a partitioned\ntable, it seems that it finds all columns that the tables in its\npartition tree specified in the publications, whether\npublish_via_partition_root is true or false.\n\nWe have done some testing w.r.t above cases with both patches and my\ncolleague will share the results.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 14 Jan 2022 17:21:41 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Friday, January 14, 2022 7:52 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\r\n> \r\n> On Wed, Jan 12, 2022 at 2:40 AM Justin Pryzby <pryzby@telsasoft.com> wrote:\r\n> >\r\n> > Is there any coordination between the \"column filter\" patch and the \"row\r\n> > filter\" patch ? Are they both on track for PG15 ? Has anybody run them\r\n> > together ?\r\n> >\r\n> \r\n> The few things where I think we might need to define some common\r\n> behavior are as follows:\r\n> \r\n\r\nI tried some cases about the points you mentions, which can be taken as\r\nreference.\r\n\r\n> 1. Replica Identity handling: Currently the column filter patch gives\r\n> an error during create/alter subscription if the specified column list\r\n> is invalid (Replica Identity columns are missing). It also gives an\r\n> error if the user tries to change the replica identity. However, it\r\n> doesn't deal with cases where the user drops and adds a different\r\n> primary key that has a different set of columns which can lead to\r\n> failure during apply on the subscriber.\r\n> \r\n\r\nAn example for this scenario:\r\n-- publisher --\r\ncreate table tbl(a int primary key, b int);\r\ncreate publication pub for table tbl(a);\r\nalter table tbl drop CONSTRAINT tbl_pkey;\r\nalter table tbl add primary key (b);\r\n\r\n-- subscriber --\r\ncreate table tbl(a int, b int);\r\ncreate subscription sub connection 'port=5432 dbname=postgres' publication pub;\r\n\r\n-- publisher --\r\ninsert into tbl values (1,1);\r\n\r\n-- subscriber --\r\npostgres=# select * from tbl;\r\n a | b\r\n---+---\r\n 1 |\r\n(1 row)\r\n\r\nupdate tbl set b=1 where a=1;\r\nalter table tbl add primary key (b);\r\n\r\n-- publisher --\r\ndelete from tbl;\r\n\r\n\r\nThe subscriber reported the following error message and DELETE failed in subscriber.\r\nERROR: publisher did not send replica identity column expected by the logical replication target relation \"public.tbl\"\r\nCONTEXT: processing remote data during \"DELETE\" for replication target relation \"public.tbl\" in transaction 723 at 2022-01-14 13:11:51.514261+08\r\n\r\n-- subscriber\r\npostgres=# select * from tbl;\r\n a | b\r\n---+---\r\n 1 | 1\r\n(1 row)\r\n\r\n> I think another issue w.r.t column filter patch is that even while\r\n> creating publication (even for 'insert' publications) it should check\r\n> that all primary key columns must be part of published columns,\r\n> otherwise, it can fail while applying on subscriber as it will try to\r\n> insert NULL for the primary key column.\r\n> \r\n\r\nFor example:\r\n-- publisher --\r\ncreate table tbl(a int primary key, b int);\r\ncreate publication pub for table tbl(a);\r\nalter table tbl drop CONSTRAINT tbl_pkey;\r\nalter table tbl add primary key (b);\r\n\r\n-- subscriber --\r\ncreate table tbl(a int, b int primary key);\r\ncreate subscription sub connection 'port=5432 dbname=postgres' publication pub;\r\n\r\n-- publisher --\r\ninsert into tbl values (1,1);\r\n\r\nThe subscriber reported the following error message and INSERT failed in subscriber.\r\nERROR: null value in column \"b\" of relation \"tbl\" violates not-null constraint\r\nDETAIL: Failing row contains (1, null).\r\n\r\n-- subscriber --\r\npostgres=# select * from tbl;\r\n a | b\r\n---+---\r\n(0 rows)\r\n\r\n> 2. Handling of partitioned tables vs. Replica Identity (RI): When\r\n> adding a partitioned table with a column list to the publication (with\r\n> publish_via_partition_root = false), we should check the Replica\r\n> Identity of all its leaf partition as the RI on the partition is the\r\n> one actually takes effect when publishing DML changes. We need to\r\n> check RI while attaching the partition as well, as the newly added\r\n> partitions will automatically become part of publication if the\r\n> partitioned table is part of the publication. If we don't do this the\r\n> later deletes/updates can fail.\r\n> \r\n\r\nPlease see the following 3 cases about partition.\r\n\r\nCase1 (publish a parent table which has a partition table):\r\n----------------------------\r\n-- publisher --\r\ncreate table parent (a int, b int) partition by range (a);\r\ncreate table child partition of parent default;\r\ncreate unique INDEX ON child (a,b);\r\nalter table child alter a set not null;\r\nalter table child alter b set not null;\r\nalter table child replica identity using INDEX child_a_b_idx;\r\ncreate publication pub for table parent(a) with(publish_via_partition_root=false);\r\n\r\n-- subscriber --\r\ncreate table parent (a int, b int) partition by range (a);\r\ncreate table child partition of parent default;\r\ncreate subscription sub connection 'port=5432 dbname=postgres' publication pub;\r\n\r\n-- publisher --\r\ninsert into parent values (1,1);\r\n\r\n-- subscriber --\r\npostgres=# select * from parent;\r\n a | b\r\n---+---\r\n 1 |\r\n(1 row)\r\n\r\n-- add RI in subscriber to avoid other errors\r\nupdate child set b=1 where a=1;\r\ncreate unique INDEX ON child (a,b);\r\nalter table child alter a set not null;\r\nalter table child alter b set not null;\r\nalter table child replica identity using INDEX child_a_b_idx;\r\n\r\n-- publisher --\r\ndelete from parent;\r\n\r\nThe subscriber reported the following error message and DELETE failed in subscriber.\r\nERROR: publisher did not send replica identity column expected by the logical replication target relation \"public.child\"\r\nCONTEXT: processing remote data during \"DELETE\" for replication target relation \"public.child\" in transaction 727 at 2022-01-14 20:29:46.50784+08\r\n\r\n-- subscriber --\r\npostgres=# select * from parent;\r\n a | b\r\n---+---\r\n 1 | 1\r\n(1 row)\r\n\r\n\r\nCase2 (create publication for parent table, then alter table to attach partition):\r\n----------------------------\r\n-- publisher --\r\ncreate table parent (a int, b int) partition by range (a);\r\ncreate table child (a int, b int);\r\ncreate unique INDEX ON child (a,b);\r\nalter table child alter a set not null;\r\nalter table child alter b set not null;\r\nalter table child replica identity using INDEX child_a_b_idx;\r\ncreate publication pub for table parent(a) with(publish_via_partition_root=false);\r\nalter table parent attach partition child default;\r\ninsert into parent values (1,1);\r\n\r\n-- subscriber --\r\ncreate table parent (a int, b int) partition by range (a);\r\ncreate table child partition of parent default;\r\ncreate subscription sub connection 'port=5432 dbname=postgres' publication pub;\r\n\r\npostgres=# select * from parent;\r\n a | b\r\n---+---\r\n 1 |\r\n(1 row)\r\n\r\n-- add RI in subscriber to avoid other errors\r\nupdate child set b=1 where a=1;\r\ncreate unique INDEX ON child (a,b);\r\nalter table child alter a set not null;\r\nalter table child alter b set not null;\r\nalter table child replica identity using INDEX child_a_b_idx;\r\n\r\n-- publisher --\r\ndelete from parent;\r\n\r\nThe subscriber reported the following error message and DELETE failed in subscriber.\r\nERROR: publisher did not send replica identity column expected by the logical replication target relation \"public.child\"\r\nCONTEXT: processing remote data during \"DELETE\" for replication target relation \"public.child\" in transaction 728 at 2022-01-14 20:42:16.483878+08\r\n\r\n-- subscriber --\r\npostgres=# select * from parent;\r\n a | b\r\n---+---\r\n 1 | 1\r\n(1 row)\r\n\r\n\r\nCase3 (create publication for parent table, then using \"create table partition\r\nof\", and specify primary key when creating partition table):\r\n----------------------------\r\n-- publisher --\r\ncreate table parent (a int, b int) partition by range (a);\r\ncreate publication pub for table parent(a) with(publish_via_partition_root=false);\r\ncreate table child partition of parent (primary key (a,b)) default;\r\n\r\n-- subscriber --\r\ncreate table parent (a int, b int) partition by range (a);\r\ncreate table child partition of parent default;\r\ncreate subscription sub connection 'port=5432 dbname=postgres' publication pub;\r\n\r\n-- publisher --\r\ninsert into parent values (1,1);\r\n\r\n-- subscriber --\r\npostgres=# select * from parent;\r\n a | b\r\n---+---\r\n 1 |\r\n(1 row)\r\n\r\n-- add PK in subscriber to avoid other errors\r\nupdate child set b=1 where a=1;\r\nalter table child add primary key (a,b);\r\n\r\n-- publisher --\r\ndelete from parent;\r\n\r\nThe subscriber reported the following error message and DELETE failed in subscriber.\r\nERROR: publisher did not send replica identity column expected by the logical replication target relation \"public.child\"\r\nCONTEXT: processing remote data during \"DELETE\" for replication target relation \"public.child\" in transaction 723 at 2022-01-14 20:45:33.622168+08\r\n\r\n-- subscriber --\r\npostgres=# select * from parent;\r\n a | b\r\n---+---\r\n 1 | 1\r\n(1 row)\r\n\r\n> 3. Tablesync.c handling: Ideally, it would be good if we have a single\r\n> query to fetch both row filters and column filters but even if that is\r\n> not possible in the first version, the behavior should be same for\r\n> both queries w.r.t partitioned tables, For ALL Tables and For All\r\n> Tables In Schema cases.\r\n> \r\n> Currently, the column filter patch doesn't seem to respect For ALL\r\n> Tables and For All Tables In Schema cases, basically, it just copies\r\n> the columns it finds through some of the publications even if one of\r\n> the publications is defined as For All Tables. The row filter patch\r\n> ignores the row filters if one of the publications is defined as For\r\n> ALL Tables and For All Tables In Schema.\r\n> \r\n\r\nA case for the publications is defined as For ALL Tables and For All Tables In\r\nSchema:\r\n-- publisher --\r\ncreate schema s1;\r\ncreate table s1.t1 (a int, b int);\r\ncreate publication p1 for table s1.t1 (a);\r\ncreate publication p2 for all tables;\r\ninsert into s1.t1 values (1,1);\r\n\r\n-- subscriber --\r\ncreate schema s1;\r\ncreate table s1.t1 (a int, b int);\r\ncreate subscription sub connection 'port=5432 dbname=postgres' publication p1, p2;\r\npostgres=# select * from s1.t1;\r\n a | b\r\n---+---\r\n 1 |\r\n(1 row)\r\n\r\n(I got the same result when p2 is specified as \"FOR ALL TABLES IN SCHEMA s1\")\r\n\r\n> For row filter patch, if the publication contains a partitioned table,\r\n> the publication parameter publish_via_partition_root determines if it\r\n> uses the partition row filter (if the parameter is false, the default)\r\n> or the root partitioned table row filter and this is taken care of\r\n> even during the initial tablesync.\r\n> \r\n> For column filter patch, if the publication contains a partitioned\r\n> table, it seems that it finds all columns that the tables in its\r\n> partition tree specified in the publications, whether\r\n> publish_via_partition_root is true or false.\r\n> \r\n\r\nPlease see the following cases.\r\n\r\nColumn filter\r\n----------------------------------------\r\n-- publisher --\r\ncreate table parent (a int, b int) partition by range (a);\r\ncreate table child partition of parent default;\r\ncreate publication p1 for table parent (a) with(publish_via_partition_root=false);\r\ncreate publication p2 for table parent (a) with(publish_via_partition_root=true);\r\ninsert into parent values (1,1);\r\n\r\n-- subscriber --\r\ncreate table parent (a int, b int) partition by range (a);\r\ncreate table child partition of parent default;\r\ncreate subscription sub connection 'port=5432 dbname=postgres' publication p1;\r\npostgres=# select * from parent; -- column filter works when publish_via_partition_root=false\r\n a | b\r\n---+---\r\n 1 |\r\n(1 row)\r\n\r\ndrop subscription sub;\r\ndelete from parent;\r\ncreate subscription sub connection 'port=5432 dbname=postgres' publication p2;\r\npostgres=# select * from parent; -- column filter also works when publish_via_partition_root=true\r\n a | b\r\n---+---\r\n 1 |\r\n(1 row)\r\n\r\n\r\nRow filter\r\n----------------------------------------\r\n-- publisher --\r\ncreate table parent (a int, b int) partition by range (a);\r\ncreate table child partition of parent default;\r\ncreate publication p1 for table parent where (a>10) with(publish_via_partition_root=false);\r\ncreate publication p2 for table parent where (a>10) with(publish_via_partition_root=true);\r\ninsert into parent values (1,1);\r\ninsert into parent values (11,11);\r\n\r\n-- subscriber\r\ncreate table parent (a int, b int) partition by range (a);\r\ncreate table child partition of parent default;\r\ncreate subscription sub connection 'port=5432 dbname=postgres' publication p1;\r\npostgres=# select * from parent; -- row filter doesn't work when publish_via_partition_root=false\r\n a | b\r\n----+----\r\n 1 | 1\r\n 11 | 11\r\n(2 rows)\r\n\r\ndrop subscription sub;\r\ndelete from parent;\r\ncreate subscription sub connection 'port=5432 dbname=postgres' publication p2;\r\npostgres=# select * from parent; -- row filter works when publish_via_partition_root=true\r\n a | b\r\n----+----\r\n 11 | 11\r\n(1 row)\r\n\r\nRegards,\r\nTang\r\n", "msg_date": "Fri, 14 Jan 2022 13:18:49 +0000", "msg_from": "\"tanghy.fnst@fujitsu.com\" <tanghy.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Column Filtering in Logical Replication" }, { "msg_contents": "On 2022-Jan-14, Amit Kapila wrote:\n\n> 1. Replica Identity handling: Currently the column filter patch gives\n> an error during create/alter subscription if the specified column list\n> is invalid (Replica Identity columns are missing). It also gives an\n> error if the user tries to change the replica identity. However, it\n> doesn't deal with cases where the user drops and adds a different\n> primary key that has a different set of columns which can lead to\n> failure during apply on the subscriber.\n\nHmm, yeah, I suppose we should check that the primary key is compatible\nwith the column list in all publications. (I wonder what happens in the\ninterim, that is, what happens to tuples modified after the initial PK\nis dropped and before the new PK is installed. Are these considered to\nhave \"replica identiy nothing\"?)\n\n> I think another issue w.r.t column filter patch is that even while\n> creating publication (even for 'insert' publications) it should check\n> that all primary key columns must be part of published columns,\n> otherwise, it can fail while applying on subscriber as it will try to\n> insert NULL for the primary key column.\n\nI'm not so sure about the primary key aspects, actually; keep in mind\nthat the replica can have a different table definition, and it might\nhave even a completely different primary key. I think this part is up\nto the user to set up correctly; we have enough with just trying to make\nthe replica identity correct.\n\n> 2. Handling of partitioned tables vs. Replica Identity (RI): When\n> adding a partitioned table with a column list to the publication (with\n> publish_via_partition_root = false), we should check the Replica\n> Identity of all its leaf partition as the RI on the partition is the\n> one actually takes effect when publishing DML changes. We need to\n> check RI while attaching the partition as well, as the newly added\n> partitions will automatically become part of publication if the\n> partitioned table is part of the publication. If we don't do this the\n> later deletes/updates can fail.\n\nHmm, yeah.\n\n> 3. Tablesync.c handling: Ideally, it would be good if we have a single\n> query to fetch both row filters and column filters but even if that is\n> not possible in the first version, the behavior should be same for\n> both queries w.r.t partitioned tables, For ALL Tables and For All\n> Tables In Schema cases.\n> \n> Currently, the column filter patch doesn't seem to respect For ALL\n> Tables and For All Tables In Schema cases, basically, it just copies\n> the columns it finds through some of the publications even if one of\n> the publications is defined as For All Tables. The row filter patch\n> ignores the row filters if one of the publications is defined as For\n> ALL Tables and For All Tables In Schema.\n\nOh, yeah, if a table appears in two publications and one of them is ALL\nTABLES [IN SCHEMA], then we don't consider it as an all-columns\npublication. You're right, that should be corrected.\n\n> We have done some testing w.r.t above cases with both patches and my\n> colleague will share the results.\n\nGreat, thanks.\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Fri, 14 Jan 2022 10:38:00 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Jan 14, 2022 at 7:08 PM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2022-Jan-14, Amit Kapila wrote:\n>\n> > 1. Replica Identity handling: Currently the column filter patch gives\n> > an error during create/alter subscription if the specified column list\n> > is invalid (Replica Identity columns are missing). It also gives an\n> > error if the user tries to change the replica identity. However, it\n> > doesn't deal with cases where the user drops and adds a different\n> > primary key that has a different set of columns which can lead to\n> > failure during apply on the subscriber.\n>\n> Hmm, yeah, I suppose we should check that the primary key is compatible\n> with the column list in all publications. (I wonder what happens in the\n> interim, that is, what happens to tuples modified after the initial PK\n> is dropped and before the new PK is installed. Are these considered to\n> have \"replica identiy nothing\"?)\n>\n\nI think so.\n\n> > I think another issue w.r.t column filter patch is that even while\n> > creating publication (even for 'insert' publications) it should check\n> > that all primary key columns must be part of published columns,\n> > otherwise, it can fail while applying on subscriber as it will try to\n> > insert NULL for the primary key column.\n>\n> I'm not so sure about the primary key aspects, actually; keep in mind\n> that the replica can have a different table definition, and it might\n> have even a completely different primary key. I think this part is up\n> to the user to set up correctly; we have enough with just trying to make\n> the replica identity correct.\n>\n\nBut OTOH, the primary key is also considered default replica identity,\nso I think users will expect it to work. You are right this problem\ncan also happen if the user defined a different primary key on a\nreplica but that is even a problem in HEAD (simple inserts will fail)\nbut I am worried about the case where both the publisher and\nsubscriber have the same primary key as that works in HEAD.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Sat, 15 Jan 2022 09:15:07 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 15.01.22 04:45, Amit Kapila wrote:\n>>> I think another issue w.r.t column filter patch is that even while\n>>> creating publication (even for 'insert' publications) it should check\n>>> that all primary key columns must be part of published columns,\n>>> otherwise, it can fail while applying on subscriber as it will try to\n>>> insert NULL for the primary key column.\n>>\n>> I'm not so sure about the primary key aspects, actually; keep in mind\n>> that the replica can have a different table definition, and it might\n>> have even a completely different primary key. I think this part is up\n>> to the user to set up correctly; we have enough with just trying to make\n>> the replica identity correct.\n> \n> But OTOH, the primary key is also considered default replica identity,\n> so I think users will expect it to work. You are right this problem\n> can also happen if the user defined a different primary key on a\n> replica but that is even a problem in HEAD (simple inserts will fail)\n> but I am worried about the case where both the publisher and\n> subscriber have the same primary key as that works in HEAD.\n\nThis would seem to be a departure from the current design of logical \nreplication. It's up to the user to arrange things so that data can be \napplied in general. Otherwise, if the default assumption is that the \nschema is the same on both sides, then column filtering shouldn't exist \nat all, since that will necessarily break that assumption.\n\nMaybe there could be a strict mode or something that has more checks, \nbut that would be a separate feature. The existing behavior is that you \ncan publish anything you want and it's up to you to make sure the \nreceiving side can store it.\n\n\n", "msg_date": "Tue, 18 Jan 2022 11:33:19 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Here are some review comments for the v17-0001 patch.\n\n~~~\n\n1. Commit message\n\nIf no column list is specified, all the columns are replicated, as\npreviously\n\nMissing period (.) at the end of that sentence.\n\n~~~\n\n2. doc/src/sgml/catalogs.sgml\n\n+ <para>\n+ This is an array of values that indicates which table columns are\n+ part of the publication. For example a value of <literal>1 3</literal>\n+ would mean that the first and the third table columns are published.\n+ A null value indicates that all attributes are published.\n+ </para></entry>\n\nMissing comma:\n\"For example\" --> \"For example,\"\n\nTerms:\nThe text seems to jump between \"columns\" and \"attributes\". Perhaps,\nfor consistency, that last sentence should say: \"A null value\nindicates that all columns are published.\"\n\n~~~\n\n3. doc/src/sgml/protocol.sgml\n\n </variablelist>\n- Next, the following message part appears for each column\n(except generated columns):\n+ Next, the following message part appears for each column (except\n+ generated columns and other columns that don't appear in the column\n+ filter list, for tables that have one):\n <variablelist>\n\nPerhaps that can be expressed more simply, like:\n\nNext, the following message part appears for each column (except\ngenerated columns and other columns not present in the optional column\nfilter list):\n\n~~~\n\n4. doc/src/sgml/ref/alter_publication.sgml\n\n+ALTER PUBLICATION <replaceable class=\"parameter\">name</replaceable>\nALTER TABLE <replaceable\nclass=\"parameter\">publication_object</replaceable> SET COLUMNS { (\n<replaceable class=\"parameter\">name</replaceable> [, ...] ) | ALL }\n\nThe syntax chart looks strange because there is already a \"TABLE\" and\na column_name list within the \"publication_object\" definition, so do\nALTER TABLE and publication_object co-exist?\nAccording to the current documentation it suggests nonsense like below is valid:\nALTER PUBLICATION mypublication ALTER TABLE TABLE t1 (a,b,c) SET\nCOLUMNS (a,b,c);\n\n--\n\nBut more fundamentally, I don't see why any new syntax is even needed at all.\n\nInstead of:\nALTER PUBLICATION mypublication ALTER TABLE users SET COLUMNS\n(user_id, firstname, lastname);\nWhy not just:\nALTER PUBLICATION mypublication ALTER TABLE users (user_id, firstname,\nlastname);\n\nThen, if the altered table defines a *different* column list then it\nwould be functionally equivalent to whatever your SET COLUMNS is doing\nnow. AFAIK this is how the Row-Filter [1] works, so that altering an\nexisting table to have a different Row-Filter just overwrites that\ntable's filter. IMO the Col-Filter behaviour should work the same as\nthat - \"SET COLUMNS\" is redundant.\n\n~~~\n\n5. doc/src/sgml/ref/alter_publication.sgml\n\n- TABLE [ ONLY ] <replaceable\nclass=\"parameter\">table_name</replaceable> [ * ] [, ... ]\n+ TABLE [ ONLY ] <replaceable\nclass=\"parameter\">table_name</replaceable> [ * ] [ ( <replaceable\nclass=\"parameter\">column_name</replaceable>, [, ... ] ) ] [, ... ]\n\nThat extra comma after the \"column_name\" seems wrong because there is\none already in \"[, ... ]\".\n\n~~~\n\n6. doc/src/sgml/ref/create_publication.sgml\n\n- TABLE [ ONLY ] <replaceable\nclass=\"parameter\">table_name</replaceable> [ * ] [, ... ]\n+ TABLE [ ONLY ] <replaceable\nclass=\"parameter\">table_name</replaceable> [ * ] [ ( <replaceable\nclass=\"parameter\">column_name</replaceable>, [, ... ] ) ] [, ... ]\n\n(Same as comment #5).\nThat extra comma after the \"column_name\" seems wrong because there is\none already in \"[, ... ]\".\n\n~~~\n\n7. doc/src/sgml/ref/create_publication.sgml\n\n+ <para>\n+ When a column list is specified, only the listed columns are replicated;\n+ any other columns are ignored for the purpose of replication through\n+ this publication. If no column list is specified, all columns of the\n+ table are replicated through this publication, including any columns\n+ added later. If a column list is specified, it must include the replica\n+ identity columns.\n+ </para>\n\nSuggest to re-word this a bit simpler:\n\ne.g.\n- \"listed columns\" --> \"named columns\"\n- I don't think it is necessary to say the unlisted columns are ignored.\n- I didn't think it is necessary to say \"though this publication\"\n\nAFTER\nWhen a column list is specified, only the named columns are replicated.\nIf no column list is specified, all columns of the table are replicated,\nincluding any columns added later. If a column list is specified, it must\ninclude the replica identity columns.\n\n~~~\n\n8. doc/src/sgml/ref/create_publication.sgml\n\nConsider adding another example showing a CREATE PUBLICATION which has\na column list.\n\n~~~\n\n9. src/backend/catalog/pg_publication.c - check_publication_add_relation\n\n /*\n- * Check if relation can be in given publication and throws appropriate\n- * error if not.\n+ * Check if relation can be in given publication and that the column\n+ * filter is sensible, and throws appropriate error if not.\n+ *\n+ * targetcols is the bitmapset of attribute numbers given in the column list,\n+ * or NULL if it was not specified.\n */\n\nTypo: \"targetcols\" --> \"columns\" ??\n\n~~~\n\n10. src/backend/catalog/pg_publication.c - check_publication_add_relation\n\n+\n+ /* Make sure the column list checks out */\n+ if (columns != NULL)\n+ {\n\nPerhaps \"checks out\" could be worded better.\n\n~~~\n\n11. src/backend/catalog/pg_publication.c - check_publication_add_relation\n\n+ /* Make sure the column list checks out */\n+ if (columns != NULL)\n+ {\n+ /*\n+ * Even if the user listed all columns in the column list, we cannot\n+ * allow a column list to be specified when REPLICA IDENTITY is FULL;\n+ * that would cause problems if a new column is added later, because\n+ * the new column would have to be included (because of being part of\n+ * the replica identity) but it's technically not allowed (because of\n+ * not being in the publication's column list yet). So reject this\n+ * case altogether.\n+ */\n+ if (replidentfull)\n+ ereport(ERROR,\n+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),\n+ errmsg(\"invalid column list for publishing relation \\\"%s\\\"\",\n+ RelationGetRelationName(targetrel)),\n+ errdetail(\"Cannot specify a column list on relations with REPLICA\nIDENTITY FULL.\"));\n+\n+ check_publication_columns(pub, targetrel, columns);\n+ }\n\nIIUC almost all of the above comment and code is redundant because by\ncalling the check_publication_columns function it will do exactly the\nsame check...\n\nSo, that entire slab might be replaced by 2 lines:\n\nif (columns != NULL)\ncheck_publication_columns(pub, targetrel, columns);\n\n~~~\n\n12. src/backend/catalog/pg_publication.c - publication_set_table_columns\n\n+publication_set_table_columns(Relation pubrel, HeapTuple pubreltup,\n+ Relation targetrel, List *columns)\n+{\n+ Bitmapset *attset;\n+ AttrNumber *attarray;\n+ HeapTuple copytup;\n+ int natts;\n+ bool nulls[Natts_pg_publication_rel];\n+ bool replaces[Natts_pg_publication_rel];\n+ Datum values[Natts_pg_publication_rel];\n+\n+ memset(values, 0, sizeof(values));\n+ memset(nulls, 0, sizeof(nulls));\n+ memset(replaces, false, sizeof(replaces));\n\nIt seemed curious to use memset false for \"replaces\" but memset 0 for\n\"nulls\", since they are both bool arrays (??)\n\n~~~\n\n13. src/backend/catalog/pg_publication.c - compare_int16\n\n+/* qsort comparator for attnums */\n+static int\n+compare_int16(const void *a, const void *b)\n+{\n+ int av = *(const int16 *) a;\n+ int bv = *(const int16 *) b;\n+\n+ /* this can't overflow if int is wider than int16 */\n+ return (av - bv);\n+}\n\nThis comparator seems common with another one already in the PG\nsource. Perhaps it would be better for generic comparators (like this\none) to be in some common code instead of scattered cut/paste copies\nof the same thing.\n\n~~~\n\n14. src/backend/commands/publicationcmds.c - AlterPublicationTables\n\n+ else if (stmt->action == AP_SetColumns)\n+ {\n+ Assert(schemaidlist == NIL);\n+ Assert(list_length(tables) == 1);\n+\n+ PublicationSetColumns(stmt, pubform,\n+ linitial_node(PublicationTable, tables));\n+ }\n\n(Same as my earlier review comment #4)\n\nSuggest to call this PublicationSetColumns based on some smarter\ndetection logic of a changed column list. Please refer to the\nRow-Filter patch [1] for this same function.\n\n~~~\n\n15. src/backend/commands/publicationcmds.c - AlterPublicationTables\n\n+ /* This is not needed to delete a table */\n+ pubrel->columns = NIL;\n\nPerhaps a more explanatory comment would be better there?\n\n~~~\n\n16. src/backend/commands/tablecmds.c - relation_mark_replica_identity\n\n@@ -15841,6 +15871,7 @@ relation_mark_replica_identity(Relation rel,\nchar ri_type, Oid indexOid,\n CatalogTupleUpdate(pg_index, &pg_index_tuple->t_self, pg_index_tuple);\n InvokeObjectPostAlterHookArg(IndexRelationId, thisIndexOid, 0,\n InvalidOid, is_internal);\n+\n /*\n * Invalidate the relcache for the table, so that after we commit\n * all sessions will refresh the table's replica identity index\n\nSpurious whitespace change seemed unrelated to the Col-Filter patch.\n\n~~~\n\n17. src/backend/parser/gram.y\n\n *\n+ * ALTER PUBLICATION name SET COLUMNS table_name (column[, ...])\n+ * ALTER PUBLICATION name SET COLUMNS table_name ALL\n+ *\n\n(Same as my earlier review comment #4)\n\nIMO there was no need for the new syntax of SET COLUMNS.\n\n~~~\n\n18. src/backend/replication/logical/proto.c - logicalrep_write_attrs\n\n- /* send number of live attributes */\n- for (i = 0; i < desc->natts; i++)\n- {\n- if (TupleDescAttr(desc, i)->attisdropped || TupleDescAttr(desc,\ni)->attgenerated)\n- continue;\n- nliveatts++;\n- }\n- pq_sendint16(out, nliveatts);\n-\n /* fetch bitmap of REPLICATION IDENTITY attributes */\n replidentfull = (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL);\n if (!replidentfull)\n idattrs = RelationGetIdentityKeyBitmap(rel);\n\n+ /* send number of live attributes */\n+ for (i = 0; i < desc->natts; i++)\n+ {\n+ Form_pg_attribute att = TupleDescAttr(desc, i);\n+\n+ if (att->attisdropped || att->attgenerated)\n+ continue;\n+ if (columns != NULL && !bms_is_member(att->attnum, columns))\n+ continue;\n+ nliveatts++;\n+ }\n+ pq_sendint16(out, nliveatts);\n+\n\nThis change seemed to have the effect of moving that 4 lines of\n\"replidentfull\" code from below the loop to above the loop. But moving\nthat code seems unrelated to the Col-Filter patch. (??).\n\n~~~\n\n19. src/backend/replication/logical/tablesync.c - fetch_remote_table_info\n\n@@ -793,12 +877,12 @@ fetch_remote_table_info(char *nspname, char *relname,\n\n ExecClearTuple(slot);\n }\n+\n ExecDropSingleTupleTableSlot(slot);\n-\n- lrel->natts = natt;\n-\n walrcv_clear_result(res);\n pfree(cmd.data);\n+\n+ lrel->natts = natt;\n }\n\nThe shuffling of those few lines seems unrelated to any requirement of\nthe Col-Filter patch (??)\n\n~~~\n\n20. src/backend/replication/logical/tablesync.c - copy_table\n\n+ for (int i = 0; i < lrel.natts; i++)\n+ {\n+ appendStringInfoString(&cmd, quote_identifier(lrel.attnames[i]));\n+ if (i < lrel.natts - 1)\n+ appendStringInfoString(&cmd, \", \");\n+ }\n\nPerhaps that could be expressed more simply if the other way around like:\n\nfor (int i = 0; i < lrel.natts; i++)\n{\nif (i)\nappendStringInfoString(&cmd, \", \");\nappendStringInfoString(&cmd, quote_identifier(lrel.attnames[i]));\n}\n\n~~~\n\n21. src/backend/replication/pgoutput/pgoutput.c\n\n+\n+ /*\n+ * Set of columns included in the publication, or NULL if all columns are\n+ * included implicitly. Note that the attnums in this list are not\n+ * shifted by FirstLowInvalidHeapAttributeNumber.\n+ */\n+ Bitmapset *columns;\n\nTypo: \"in this list\" --> \"in this set\" (??)\n\n~~~\n\n22. src/backend/replication/pgoutput/pgoutput.c - get_rel_sync_entry\n\n * Don't publish changes for partitioned tables, because\n- * publishing those of its partitions suffices, unless partition\n- * changes won't be published due to pubviaroot being set.\n+ * publishing those of its partitions suffices. (However, ignore\n+ * this if partition changes are not to published due to\n+ * pubviaroot being set.)\n */\n\nThis change seems unrelated to the Col-Filter patch, so perhaps it\nshould not be here at all.\n\nAlso, typo: \"are not to published\"\n\n~~~\n\n23. src/backend/replication/pgoutput/pgoutput.c - get_rel_sync_entry\n\n+ /*\n+ * Obtain columns published by this publication, and add them\n+ * to the list for this rel. Note that if at least one\n+ * publication has a empty column list, that means to publish\n+ * everything; so if we saw a publication that includes all\n+ * columns, skip this.\n+ */\n\nTypo: \"a empty\" --> \"an empty\"\n\n~~~\n\n24. src/backend/replication/pgoutput/pgoutput.c - get_rel_sync_entry\n\n+ if (isnull)\n+ {\n+ /*\n+ * If we see a publication with no columns, reset the\n+ * list and ignore further ones.\n+ */\n\nPerhaps that comment is meant to say \"with no column filter\" instead\nof \"with no columns\"?\n\n~~~\n\n25. src/backend/replication/pgoutput/pgoutput.c - get_rel_sync_entry\n\n+ if (isnull)\n+ {\n...\n+ }\n+ else if (!isnull)\n+ {\n...\n+ }\n\nIs the \"if (!isnull)\" in the else just to be really REALLY sure it is not null?\n\n~~~\n\n26. src/bin/pg_dump/pg_dump.c - getPublicationTables\n\n+ pubrinfo[i].pubrattrs = attribs->data;\n+ }\n+ else\n+ pubrinfo[j].pubrattrs = NULL;\n\nI got confused reading this code. Are those different indices 'i' and\n'j' correct?\n\n~~~\n\n27. src/bin/psql/describe.c\n\nThe Row-Filter [1] displays filter information not only for the psql\n\\dRp+ command but also for the psql \\d <tablename> command. Perhaps\nthe Col-Filter patch should do that too.\n\n~~~\n\n28. src/bin/psql/tab-complete.c\n\n@@ -1657,6 +1657,8 @@ psql_completion(const char *text, int start, int end)\n /* ALTER PUBLICATION <name> ADD */\n else if (Matches(\"ALTER\", \"PUBLICATION\", MatchAny, \"ADD\"))\n COMPLETE_WITH(\"ALL TABLES IN SCHEMA\", \"TABLE\");\n+ else if (Matches(\"ALTER\", \"PUBLICATION\", MatchAny, \"ADD\", \"TABLE\"))\n+ COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);\n /* ALTER PUBLICATION <name> DROP */\n\nI am not sure about this one- is that change even related to the\nCol-Filter patch or is this some unrelated bugfix?\n\n~~~\n\n29. src/include/catalog/pg_publication.h\n\n@@ -86,6 +86,7 @@ typedef struct Publication\n typedef struct PublicationRelInfo\n {\n Relation relation;\n+ List *columns;\n } PublicationRelInfo;\n\nPerhaps that needs some comment. e.g. do you need to mention that a\nNIL List means all columns?\n\n~~~\n\n30. src/include/nodes/parsenodes.h\n\n@@ -3642,6 +3642,7 @@ typedef struct PublicationTable\n {\n NodeTag type;\n RangeVar *relation; /* relation to be published */\n+ List *columns; /* List of columns in a publication table */\n } PublicationTable;\n\n\nThat comment \"List of columns in a publication table\" doesn't really\nsay anything helpful.\n\nPerhaps it should mention that a NIL List means all table columns?\n\n~~~\n\n31. src/test/regress/sql/publication.sql\n\nThe regression test file has an uncommon mixture of /* */ and -- style comments.\n\nPerhaps change all the /* */ ones?\n\n~~~\n\n32. src/test/regress/sql/publication.sql\n\n+CREATE TABLE testpub_tbl5 (a int PRIMARY KEY, b text, c text,\n+ d int generated always as (a + length(b)) stored);\n+ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, x); -- error\n+ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (b, c); -- error\n+ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, d); -- error\n+ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, c); -- ok\n\nFor all these tests (and more) there seems not sufficient explanation\ncomments to say exactly what each test case is testing, e.g. *why* is\nan \"error\" expected for some cases but \"ok\" for others.\n\n~~~\n\n33. src/test/regress/sql/publication.sql\n\n\"-- no dice\"\n\n(??) confusing comment.\n\n~~~\n\n34. src/test/subscription/t/028_column_list.pl\n\nI think a few more comments in this TAP file would help to make the\npurpose of the tests more clear.\n\n------\n[1] https://www.postgresql.org/message-id/flat/CAHut%2BPtNWXPba0h%3Ddo_UiwaEziePNr7Z%2B58%2B-ctpyP2Pq1VkPw%40mail.gmail.com#76afd191811cba236198f62e60f44ade\n\nKind Regards,\nPeter Smith.\nFujitsu Australia\n\n\n", "msg_date": "Fri, 28 Jan 2022 19:39:41 +1100", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 12.01.22 01:41, Alvaro Herrera wrote:\n> I discovered a big hole in this, which is that ALTER PUBLICATION SET\n> (publish='insert,update') can add UPDATE publishing to a publication\n> that was only publishing INSERTs. It's easy to implement a fix: in\n> AlterPublicationOptions, scan the list of tables and raise an error if\n> any of them has a column list that doesn't include all the columns in\n> the replica identity.\n\nRight now, we are not checking the publication options and the replica \nidentity combinations at all at DDL time. This is only checked at \nexecution time in CheckCmdReplicaIdentity(). So under that scheme I \ndon't think the check you describe is actually necessary. Let the user \nset whatever combination they want, and check at execution time if it's \nan UPDATE or DELETE command whether the replica identity is sufficient.\n\n\n", "msg_date": "Mon, 31 Jan 2022 08:13:47 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi,\n\nHere's an updated version of the patch, rebased to current master. Parts \n0002 and 0003 include various improvements based on review by me and \nanother one by Peter Smith [1].\n\nPart 0003 reworks and significantly extends the TAP test, to exercise \nvarious cases related to changes of replica identity etc. discussed in \nthis thread. Some of the tests however still fail, because the behavior \nwas not updated - I'll work on that once we agree what the expected \nbehavior is.\n\n1) partitioning with pubviaroot=true\n\nThe main set of failures is related to partitions with different replica \nidentities and (pubviaroot=true), some of which may be mismatching the \ncolumn list. There are multiple such test cases, depending on how the \ninconsistency is introduced - it may be there from the beginning, the \ncolumn filter may be modified after adding the partitioned table to the \npublication, etc.\n\nI think the expected behavior is to prohibit such cases from happening, \nby cross-checking the column filter when adding the partitioned table to \npublication, attaching a partition or changing a column filter.\n\n\n2) merging multiple column filters\n\nWhen the table has multiple column filters (in different publications), \nwe need to merge them. Which works, except that FOR ALL TABLES [IN \nSCHEMA] needs to be handled as \"has no column filter\" (and replicates \neverything).\n\n\n3) partitioning with pubivaroot=false\n\nWhen a partitioned table is added with (pubviaroot=false), it should not \nbe subject to column filter on the parent relation, which is the same \nbehavior used by the row filtering patch.\n\n\nregards\n\n\n[1] \nhttps://www.postgresql.org/message-id/CAHut%2BPtc7Rh187eQKrxdUmUNWyfxz7OkhYAX%3DAW411Qwxya0LQ%40mail.gmail.com\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Wed, 16 Feb 2022 00:33:37 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi Peter,\n\nThanks for the review and sorry for taking so long.\n\nI've addressed most of the comments in the patch I sent a couple minutes \nago. More comments in-line:\n\n\nOn 1/28/22 09:39, Peter Smith wrote:\n> Here are some review comments for the v17-0001 patch.\n> \n> ~~~\n> \n> 1. Commit message\n> \n> If no column list is specified, all the columns are replicated, as\n> previously\n> \n> Missing period (.) at the end of that sentence.\n> \n\nI plan to reword that anyway.\n\n> ~~~\n> \n> 2. doc/src/sgml/catalogs.sgml\n> \n> + <para>\n> + This is an array of values that indicates which table columns are\n> + part of the publication. For example a value of <literal>1 3</literal>\n> + would mean that the first and the third table columns are published.\n> + A null value indicates that all attributes are published.\n> + </para></entry>\n> \n> Missing comma:\n> \"For example\" --> \"For example,\"\n> \n\nFixed.\n\n> Terms:\n> The text seems to jump between \"columns\" and \"attributes\". Perhaps,\n> for consistency, that last sentence should say: \"A null value\n> indicates that all columns are published.\"\n> \n\nYeah, but that's a pre-existing problem. I've modified the parts added \nby the patch to use \"columns\" though.\n\n> ~~~\n> \n> 3. doc/src/sgml/protocol.sgml\n> \n> </variablelist>\n> - Next, the following message part appears for each column\n> (except generated columns):\n> + Next, the following message part appears for each column (except\n> + generated columns and other columns that don't appear in the column\n> + filter list, for tables that have one):\n> <variablelist>\n> \n> Perhaps that can be expressed more simply, like:\n> \n> Next, the following message part appears for each column (except\n> generated columns and other columns not present in the optional column\n> filter list):\n> \n\nNot sure. I'll think about it.\n\n> ~~~\n> \n> 4. doc/src/sgml/ref/alter_publication.sgml\n> \n> +ALTER PUBLICATION <replaceable class=\"parameter\">name</replaceable>\n> ALTER TABLE <replaceable\n> class=\"parameter\">publication_object</replaceable> SET COLUMNS { (\n> <replaceable class=\"parameter\">name</replaceable> [, ...] ) | ALL }\n> \n> The syntax chart looks strange because there is already a \"TABLE\" and\n> a column_name list within the \"publication_object\" definition, so do\n> ALTER TABLE and publication_object co-exist?\n> According to the current documentation it suggests nonsense like below is valid:\n> ALTER PUBLICATION mypublication ALTER TABLE TABLE t1 (a,b,c) SET\n> COLUMNS (a,b,c);\n> \n\nYeah, I think that's wrong. I think \"publication_object\" is wrong in \nthis place, so I've used \"table_name\".\n\n> --\n> \n> But more fundamentally, I don't see why any new syntax is even needed at all.\n> \n> Instead of:\n> ALTER PUBLICATION mypublication ALTER TABLE users SET COLUMNS\n> (user_id, firstname, lastname);\n> Why not just:\n> ALTER PUBLICATION mypublication ALTER TABLE users (user_id, firstname,\n> lastname);\n> \n\nI haven't modified the grammar yet, but I agree SET COLUMNS seems a bit \nunnecessary. It also seems a bit inconsistent with ADD TABLE which \nsimply lists the columns right adter the table name.\n\n> Then, if the altered table defines a *different* column list then it\n> would be functionally equivalent to whatever your SET COLUMNS is doing\n> now. AFAIK this is how the Row-Filter [1] works, so that altering an\n> existing table to have a different Row-Filter just overwrites that\n> table's filter. IMO the Col-Filter behaviour should work the same as\n> that - \"SET COLUMNS\" is redundant.\n> \n\nI'm sorry, I don't understand what this is saying :-(\n\n> ~~~\n> \n> 5. doc/src/sgml/ref/alter_publication.sgml\n> \n> - TABLE [ ONLY ] <replaceable\n> class=\"parameter\">table_name</replaceable> [ * ] [, ... ]\n> + TABLE [ ONLY ] <replaceable\n> class=\"parameter\">table_name</replaceable> [ * ] [ ( <replaceable\n> class=\"parameter\">column_name</replaceable>, [, ... ] ) ] [, ... ]\n> \n> That extra comma after the \"column_name\" seems wrong because there is\n> one already in \"[, ... ]\".\n> \n\nFixed.\n\n> ~~~\n> \n> 6. doc/src/sgml/ref/create_publication.sgml\n> \n> - TABLE [ ONLY ] <replaceable\n> class=\"parameter\">table_name</replaceable> [ * ] [, ... ]\n> + TABLE [ ONLY ] <replaceable\n> class=\"parameter\">table_name</replaceable> [ * ] [ ( <replaceable\n> class=\"parameter\">column_name</replaceable>, [, ... ] ) ] [, ... ]\n> \n> (Same as comment #5).\n> That extra comma after the \"column_name\" seems wrong because there is\n> one already in \"[, ... ]\".\n> \n\nFixed.\n\n> ~~~\n> \n> 7. doc/src/sgml/ref/create_publication.sgml\n> \n> + <para>\n> + When a column list is specified, only the listed columns are replicated;\n> + any other columns are ignored for the purpose of replication through\n> + this publication. If no column list is specified, all columns of the\n> + table are replicated through this publication, including any columns\n> + added later. If a column list is specified, it must include the replica\n> + identity columns.\n> + </para>\n> \n> Suggest to re-word this a bit simpler:\n> \n> e.g.\n> - \"listed columns\" --> \"named columns\"\n> - I don't think it is necessary to say the unlisted columns are ignored.\n> - I didn't think it is necessary to say \"though this publication\"\n> \n> AFTER\n> When a column list is specified, only the named columns are replicated.\n> If no column list is specified, all columns of the table are replicated,\n> including any columns added later. If a column list is specified, it must\n> include the replica identity columns.\n> \n\nFixed, seems reasonable.\n\n> ~~~\n> \n> 8. doc/src/sgml/ref/create_publication.sgml\n> \n> Consider adding another example showing a CREATE PUBLICATION which has\n> a column list.\n> \n\nAdded.\n\n> ~~~\n> \n> 9. src/backend/catalog/pg_publication.c - check_publication_add_relation\n> \n> /*\n> - * Check if relation can be in given publication and throws appropriate\n> - * error if not.\n> + * Check if relation can be in given publication and that the column\n> + * filter is sensible, and throws appropriate error if not.\n> + *\n> + * targetcols is the bitmapset of attribute numbers given in the column list,\n> + * or NULL if it was not specified.\n> */\n> \n> Typo: \"targetcols\" --> \"columns\" ??\n> \n\nRight, I noticed that too.\n\n> ~~~\n> \n> 10. src/backend/catalog/pg_publication.c - check_publication_add_relation\n> \n> +\n> + /* Make sure the column list checks out */\n> + if (columns != NULL)\n> + {\n> \n> Perhaps \"checks out\" could be worded better.\n> \n\nRight, I expanded that in my review.\n\n> ~~~\n> \n> 11. src/backend/catalog/pg_publication.c - check_publication_add_relation\n> \n> + /* Make sure the column list checks out */\n> + if (columns != NULL)\n> + {\n> + /*\n> + * Even if the user listed all columns in the column list, we cannot\n> + * allow a column list to be specified when REPLICA IDENTITY is FULL;\n> + * that would cause problems if a new column is added later, because\n> + * the new column would have to be included (because of being part of\n> + * the replica identity) but it's technically not allowed (because of\n> + * not being in the publication's column list yet). So reject this\n> + * case altogether.\n> + */\n> + if (replidentfull)\n> + ereport(ERROR,\n> + errcode(ERRCODE_FEATURE_NOT_SUPPORTED),\n> + errmsg(\"invalid column list for publishing relation \\\"%s\\\"\",\n> + RelationGetRelationName(targetrel)),\n> + errdetail(\"Cannot specify a column list on relations with REPLICA\n> IDENTITY FULL.\"));\n> +\n> + check_publication_columns(pub, targetrel, columns);\n> + }\n> \n> IIUC almost all of the above comment and code is redundant because by\n> calling the check_publication_columns function it will do exactly the\n> same check...\n> \n> So, that entire slab might be replaced by 2 lines:\n> \n> if (columns != NULL)\n> check_publication_columns(pub, targetrel, columns);\n> \n\nYou're right. But I think we can make that even simpler by moving even \nthe (columns!=NULL) check into the function.\n\n> ~~~\n> \n> 12. src/backend/catalog/pg_publication.c - publication_set_table_columns\n> \n> +publication_set_table_columns(Relation pubrel, HeapTuple pubreltup,\n> + Relation targetrel, List *columns)\n> +{\n> + Bitmapset *attset;\n> + AttrNumber *attarray;\n> + HeapTuple copytup;\n> + int natts;\n> + bool nulls[Natts_pg_publication_rel];\n> + bool replaces[Natts_pg_publication_rel];\n> + Datum values[Natts_pg_publication_rel];\n> +\n> + memset(values, 0, sizeof(values));\n> + memset(nulls, 0, sizeof(nulls));\n> + memset(replaces, false, sizeof(replaces));\n> \n> It seemed curious to use memset false for \"replaces\" but memset 0 for\n> \"nulls\", since they are both bool arrays (??)\n> \n\nFixed.\n\n> ~~~\n> \n> 13. src/backend/catalog/pg_publication.c - compare_int16\n> \n> +/* qsort comparator for attnums */\n> +static int\n> +compare_int16(const void *a, const void *b)\n> +{\n> + int av = *(const int16 *) a;\n> + int bv = *(const int16 *) b;\n> +\n> + /* this can't overflow if int is wider than int16 */\n> + return (av - bv);\n> +}\n> \n> This comparator seems common with another one already in the PG\n> source. Perhaps it would be better for generic comparators (like this\n> one) to be in some common code instead of scattered cut/paste copies\n> of the same thing.\n> \n\nI thought about it, but it doesn't really seem worth the effort.\n\n> ~~~\n> \n> 14. src/backend/commands/publicationcmds.c - AlterPublicationTables\n> \n> + else if (stmt->action == AP_SetColumns)\n> + {\n> + Assert(schemaidlist == NIL);\n> + Assert(list_length(tables) == 1);\n> +\n> + PublicationSetColumns(stmt, pubform,\n> + linitial_node(PublicationTable, tables));\n> + }\n> \n> (Same as my earlier review comment #4)\n> \n> Suggest to call this PublicationSetColumns based on some smarter\n> detection logic of a changed column list. Please refer to the\n> Row-Filter patch [1] for this same function.\n> \n\nI don't understand. Comment #4 is about syntax, no?\n\n> ~~~\n> \n> 15. src/backend/commands/publicationcmds.c - AlterPublicationTables\n> \n> + /* This is not needed to delete a table */\n> + pubrel->columns = NIL;\n> \n> Perhaps a more explanatory comment would be better there?\n> \n\nIf I understand the comment, it says we don't actually need to set \ncolumns to NIL. In which case we can just get rid of the change.\n\n> ~~~\n> \n> 16. src/backend/commands/tablecmds.c - relation_mark_replica_identity\n> \n> @@ -15841,6 +15871,7 @@ relation_mark_replica_identity(Relation rel,\n> char ri_type, Oid indexOid,\n> CatalogTupleUpdate(pg_index, &pg_index_tuple->t_self, pg_index_tuple);\n> InvokeObjectPostAlterHookArg(IndexRelationId, thisIndexOid, 0,\n> InvalidOid, is_internal);\n> +\n> /*\n> * Invalidate the relcache for the table, so that after we commit\n> * all sessions will refresh the table's replica identity index\n> \n> Spurious whitespace change seemed unrelated to the Col-Filter patch.\n> \n\nFixed.\n\n> ~~~\n> \n> 17. src/backend/parser/gram.y\n> \n> *\n> + * ALTER PUBLICATION name SET COLUMNS table_name (column[, ...])\n> + * ALTER PUBLICATION name SET COLUMNS table_name ALL\n> + *\n> \n> (Same as my earlier review comment #4)\n> \n> IMO there was no need for the new syntax of SET COLUMNS.\n> \n\nNot modified yet, we'll see about the syntax.\n\n> ~~~\n> \n> 18. src/backend/replication/logical/proto.c - logicalrep_write_attrs\n> \n> - /* send number of live attributes */\n> - for (i = 0; i < desc->natts; i++)\n> - {\n> - if (TupleDescAttr(desc, i)->attisdropped || TupleDescAttr(desc,\n> i)->attgenerated)\n> - continue;\n> - nliveatts++;\n> - }\n> - pq_sendint16(out, nliveatts);\n> -\n> /* fetch bitmap of REPLICATION IDENTITY attributes */\n> replidentfull = (rel->rd_rel->relreplident == REPLICA_IDENTITY_FULL);\n> if (!replidentfull)\n> idattrs = RelationGetIdentityKeyBitmap(rel);\n> \n> + /* send number of live attributes */\n> + for (i = 0; i < desc->natts; i++)\n> + {\n> + Form_pg_attribute att = TupleDescAttr(desc, i);\n> +\n> + if (att->attisdropped || att->attgenerated)\n> + continue;\n> + if (columns != NULL && !bms_is_member(att->attnum, columns))\n> + continue;\n> + nliveatts++;\n> + }\n> + pq_sendint16(out, nliveatts);\n> +\n> \n> This change seemed to have the effect of moving that 4 lines of\n> \"replidentfull\" code from below the loop to above the loop. But moving\n> that code seems unrelated to the Col-Filter patch. (??).\n> \n\nRight, restored the original code.\n\n> ~~~\n> \n> 19. src/backend/replication/logical/tablesync.c - fetch_remote_table_info\n> \n> @@ -793,12 +877,12 @@ fetch_remote_table_info(char *nspname, char *relname,\n> \n> ExecClearTuple(slot);\n> }\n> +\n> ExecDropSingleTupleTableSlot(slot);\n> -\n> - lrel->natts = natt;\n> -\n> walrcv_clear_result(res);\n> pfree(cmd.data);\n> +\n> + lrel->natts = natt;\n> }\n> \n> The shuffling of those few lines seems unrelated to any requirement of\n> the Col-Filter patch (??)\n> \n\nYep, undone. I'd bet this is simply due to older versions of the patch \ntouching this place, and then undoing some of it.\n\n> ~~~\n> \n> 20. src/backend/replication/logical/tablesync.c - copy_table\n> \n> + for (int i = 0; i < lrel.natts; i++)\n> + {\n> + appendStringInfoString(&cmd, quote_identifier(lrel.attnames[i]));\n> + if (i < lrel.natts - 1)\n> + appendStringInfoString(&cmd, \", \");\n> + }\n> \n> Perhaps that could be expressed more simply if the other way around like:\n> \n> for (int i = 0; i < lrel.natts; i++)\n> {\n> if (i)\n> appendStringInfoString(&cmd, \", \");\n> appendStringInfoString(&cmd, quote_identifier(lrel.attnames[i]));\n> }\n> \n\nI used a slightly different version.\n\n> ~~~\n> \n> 21. src/backend/replication/pgoutput/pgoutput.c\n> \n> +\n> + /*\n> + * Set of columns included in the publication, or NULL if all columns are\n> + * included implicitly. Note that the attnums in this list are not\n> + * shifted by FirstLowInvalidHeapAttributeNumber.\n> + */\n> + Bitmapset *columns;\n> \n> Typo: \"in this list\" --> \"in this set\" (??)\n> \n\n\"bitmap\" is what we call Bitmapset so I used that.\n\n> ~~~\n> \n> 22. src/backend/replication/pgoutput/pgoutput.c - get_rel_sync_entry\n> \n> * Don't publish changes for partitioned tables, because\n> - * publishing those of its partitions suffices, unless partition\n> - * changes won't be published due to pubviaroot being set.\n> + * publishing those of its partitions suffices. (However, ignore\n> + * this if partition changes are not to published due to\n> + * pubviaroot being set.)\n> */\n> \n> This change seems unrelated to the Col-Filter patch, so perhaps it\n> should not be here at all.\n> \n> Also, typo: \"are not to published\"\n> \n\nYeah, unrelated. Reverted.\n\n> ~~~\n> \n> 23. src/backend/replication/pgoutput/pgoutput.c - get_rel_sync_entry\n> \n> + /*\n> + * Obtain columns published by this publication, and add them\n> + * to the list for this rel. Note that if at least one\n> + * publication has a empty column list, that means to publish\n> + * everything; so if we saw a publication that includes all\n> + * columns, skip this.\n> + */\n> \n> Typo: \"a empty\" --> \"an empty\"\n> \n\nFixed.\n\n> ~~~\n> \n> 24. src/backend/replication/pgoutput/pgoutput.c - get_rel_sync_entry\n> \n> + if (isnull)\n> + {\n> + /*\n> + * If we see a publication with no columns, reset the\n> + * list and ignore further ones.\n> + */\n> \n> Perhaps that comment is meant to say \"with no column filter\" instead\n> of \"with no columns\"?\n> \n\nYep, fixed.\n\n> ~~~\n> \n> 25. src/backend/replication/pgoutput/pgoutput.c - get_rel_sync_entry\n> \n> + if (isnull)\n> + {\n> ...\n> + }\n> + else if (!isnull)\n> + {\n> ...\n> + }\n> \n> Is the \"if (!isnull)\" in the else just to be really REALLY sure it is not null?\n> \n\nDouble-tap ;-) Removed the condition.\n\n> ~~~\n> \n> 26. src/bin/pg_dump/pg_dump.c - getPublicationTables\n> \n> + pubrinfo[i].pubrattrs = attribs->data;\n> + }\n> + else\n> + pubrinfo[j].pubrattrs = NULL;\n> \n> I got confused reading this code. Are those different indices 'i' and\n> 'j' correct?\n> \n\nGood catch! I think you're right and it should be \"j\" in both places. \nThis'd only cause trouble in selective pg_dumps (when dumping selected \ntables). The patch clearly needs some pg_dump tests.\n\n> ~~~\n> \n> 27. src/bin/psql/describe.c\n> \n> The Row-Filter [1] displays filter information not only for the psql\n> \\dRp+ command but also for the psql \\d <tablename> command. Perhaps\n> the Col-Filter patch should do that too.\n> \n\nNot sure.\n\n> ~~~\n> \n> 28. src/bin/psql/tab-complete.c\n> \n> @@ -1657,6 +1657,8 @@ psql_completion(const char *text, int start, int end)\n> /* ALTER PUBLICATION <name> ADD */\n> else if (Matches(\"ALTER\", \"PUBLICATION\", MatchAny, \"ADD\"))\n> COMPLETE_WITH(\"ALL TABLES IN SCHEMA\", \"TABLE\");\n> + else if (Matches(\"ALTER\", \"PUBLICATION\", MatchAny, \"ADD\", \"TABLE\"))\n> + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);\n> /* ALTER PUBLICATION <name> DROP */\n> \n> I am not sure about this one- is that change even related to the\n> Col-Filter patch or is this some unrelated bugfix?\n> \n\nYeah, seems unrelated - possibly from a rebase or something. Removed.\n\n> ~~~\n> \n> 29. src/include/catalog/pg_publication.h\n> \n> @@ -86,6 +86,7 @@ typedef struct Publication\n> typedef struct PublicationRelInfo\n> {\n> Relation relation;\n> + List *columns;\n> } PublicationRelInfo;\n> \n> Perhaps that needs some comment. e.g. do you need to mention that a\n> NIL List means all columns?\n> \n\nI added a short comment.\n\n> ~~~\n> \n> 30. src/include/nodes/parsenodes.h\n> \n> @@ -3642,6 +3642,7 @@ typedef struct PublicationTable\n> {\n> NodeTag type;\n> RangeVar *relation; /* relation to be published */\n> + List *columns; /* List of columns in a publication table */\n> } PublicationTable;\n> \n> \n> That comment \"List of columns in a publication table\" doesn't really\n> say anything helpful.\n> \n> Perhaps it should mention that a NIL List means all table columns?\n> \n\nNot sure, seems fine.\n\n> ~~~\n> \n> 31. src/test/regress/sql/publication.sql\n> \n> The regression test file has an uncommon mixture of /* */ and -- style comments.\n> \n> Perhaps change all the /* */ ones?\n> \n\nYeah, that needs some cleanup. I haven't done anything about it yet.\n\n> ~~~\n> \n> 32. src/test/regress/sql/publication.sql\n> \n> +CREATE TABLE testpub_tbl5 (a int PRIMARY KEY, b text, c text,\n> + d int generated always as (a + length(b)) stored);\n> +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, x); -- error\n> +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (b, c); -- error\n> +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, d); -- error\n> +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (a, c); -- ok\n> \n> For all these tests (and more) there seems not sufficient explanation\n> comments to say exactly what each test case is testing, e.g. *why* is\n> an \"error\" expected for some cases but \"ok\" for others.\n> \n\nNot sure. I think the error is generally obvious in the expected output.\n\n> ~~~\n> \n> 33. src/test/regress/sql/publication.sql\n> \n> \"-- no dice\"\n> \n> (??) confusing comment.\n> \n\nSame as for the errors.\n\n> ~~~\n> \n> 34. src/test/subscription/t/028_column_list.pl\n> \n> I think a few more comments in this TAP file would help to make the\n> purpose of the tests more clear.\n> \n\nYeah, the 0004 patch I shared a couple minutes ago does exactly that.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Wed, 16 Feb 2022 00:57:31 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2022-Feb-16, Tomas Vondra wrote:\n\n> Here's an updated version of the patch, rebased to current master. Parts\n> 0002 and 0003 include various improvements based on review by me and another\n> one by Peter Smith [1].\n\nThanks for doing this!\n\n> 1) partitioning with pubviaroot=true\n\nI agree that preventing the inconsistencies from happening is probably\nthe best.\n\n> 2) merging multiple column filters\n> \n> When the table has multiple column filters (in different publications), we\n> need to merge them. Which works, except that FOR ALL TABLES [IN SCHEMA]\n> needs to be handled as \"has no column filter\" (and replicates everything).\n\nAgreed.\n\n> 3) partitioning with pubivaroot=false\n> \n> When a partitioned table is added with (pubviaroot=false), it should not be\n> subject to column filter on the parent relation, which is the same behavior\n> used by the row filtering patch.\n\nYou mean each partition should define its own filter, or lack of filter?\nThat sounds reasonable.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\"Pensar que el espectro que vemos es ilusorio no lo despoja de espanto,\nsólo le suma el nuevo terror de la locura\" (Perelandra, C.S. Lewis)\n\n\n", "msg_date": "Tue, 15 Feb 2022 21:33:32 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 2/16/22 01:33, Alvaro Herrera wrote:\n> On 2022-Feb-16, Tomas Vondra wrote:\n> \n>> Here's an updated version of the patch, rebased to current master. Parts\n>> 0002 and 0003 include various improvements based on review by me and another\n>> one by Peter Smith [1].\n> \n> Thanks for doing this!\n> \n>> 1) partitioning with pubviaroot=true\n> \n> I agree that preventing the inconsistencies from happening is probably\n> the best.\n> \n>> 2) merging multiple column filters\n>>\n>> When the table has multiple column filters (in different publications), we\n>> need to merge them. Which works, except that FOR ALL TABLES [IN SCHEMA]\n>> needs to be handled as \"has no column filter\" (and replicates everything).\n> \n> Agreed.\n> \n>> 3) partitioning with pubivaroot=false\n>>\n>> When a partitioned table is added with (pubviaroot=false), it should not be\n>> subject to column filter on the parent relation, which is the same behavior\n>> used by the row filtering patch.\n> \n> You mean each partition should define its own filter, or lack of filter?\n> That sounds reasonable.\n> \n\nIf the partition is not published by the root, it shouldn't use the\nfilter defined on the root. I wonder what should happen to the filter\ndefined on the partition itself. I'd say\n\npubviaroot=false -> use filter defined on partition (if any)\n\npubviaroot=true -> use filter defined on root (if any)\n\n\nI wonder what the row filter patch is doing - we should probably follow\nthe same logic, if only to keep the filtering stuff consistent.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Wed, 16 Feb 2022 01:39:37 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Feb 16, 2022 at 6:09 AM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 2/16/22 01:33, Alvaro Herrera wrote:\n> >\n> >> 3) partitioning with pubivaroot=false\n> >>\n> >> When a partitioned table is added with (pubviaroot=false), it should not be\n> >> subject to column filter on the parent relation, which is the same behavior\n> >> used by the row filtering patch.\n> >\n> > You mean each partition should define its own filter, or lack of filter?\n> > That sounds reasonable.\n> >\n>\n> If the partition is not published by the root, it shouldn't use the\n> filter defined on the root. I wonder what should happen to the filter\n> defined on the partition itself. I'd say\n>\n> pubviaroot=false -> use filter defined on partition (if any)\n>\n> pubviaroot=true -> use filter defined on root (if any)\n>\n>\n> I wonder what the row filter patch is doing - we should probably follow\n> the same logic, if only to keep the filtering stuff consistent.\n>\n\nThe row filter patch is doing the same and additionally, it gives an\nerror if the user provides a filter for a partitioned table with\npubviaroot as false.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 16 Feb 2022 15:00:29 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Feb 16, 2022 at 5:03 AM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> Hi,\n>\n> Here's an updated version of the patch, rebased to current master. Parts\n> 0002 and 0003 include various improvements based on review by me and\n> another one by Peter Smith [1].\n>\n> Part 0003 reworks and significantly extends the TAP test, to exercise\n> various cases related to changes of replica identity etc. discussed in\n> this thread. Some of the tests however still fail, because the behavior\n> was not updated - I'll work on that once we agree what the expected\n> behavior is.\n>\n> 1) partitioning with pubviaroot=true\n>\n> The main set of failures is related to partitions with different replica\n> identities and (pubviaroot=true), some of which may be mismatching the\n> column list. There are multiple such test cases, depending on how the\n> inconsistency is introduced - it may be there from the beginning, the\n> column filter may be modified after adding the partitioned table to the\n> publication, etc.\n>\n> I think the expected behavior is to prohibit such cases from happening,\n> by cross-checking the column filter when adding the partitioned table to\n> publication, attaching a partition or changing a column filter.\n>\n\nI feel it is better to follow the way described by Peter E. here [1]\nto handle these cases. The row filter patch is also using the same\nscheme as that is what we are doing now for Updates/Deletes and it\nwould be really challenging and much more effort/code to deal with\neverything at DDL time. I have tried to explain some of that in my\nemails [2][3].\n\n[1] - https://www.postgresql.org/message-id/ca91dc91-80ba-e954-213e-b4170a6160f5%40enterprisedb.com\n[2] - https://www.postgresql.org/message-id/CAA4eK1%2Bm45Xyzx7AUY9TyFnB6CZ7_%2B_uooPb7WHSpp7UE%3DYmKg%40mail.gmail.com\n[3] - https://www.postgresql.org/message-id/CAA4eK1%2B1DMkCip9SB3B0_u0Q6fGf-D3vgqQodkLfur0qkL482g%40mail.gmail.com\n\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 16 Feb 2022 15:35:34 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi,\n\nAttached is an updated patch, addressing most of the issues reported so\nfar. There are various minor tweaks, but the main changes are:\n\n1) regular regression tests, verifying (hopefully) all the various cases\nof publication vs. column filters, replica identity check at various\nchanges and so on\n\n2) pg_dump tests, testing column filters (alone and with row filter)\n\n3) checks of column filter vs. publish_via_partition_root and replica\nidentity, following the same logic as the row-filter patch (hopefully,\nit touches the same places, using the same logic, ...)\n\nThat means - with \"publish_via_partition_root=false\" it's not allowed to\nspecify column filters on partitioned tables, only for leaf partitions.\n\nAnd we check column filter vs. replica identity when adding tables to\npublications, or whenever we change the replica identity.\n\n\nThe patch is still a bit crude, I'm sure some of the places (especially\nthe new ones) may need cleanup/recovery. But I think it's much closer to\nbeing committable, I think.\n\nThe first two simple patches are adding tests for the row filtering. So\nthis is not really part of this patch.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Wed, 2 Mar 2022 13:13:15 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "I applied this patch in my branch with CI hacks to show code coverage on\ncirrus.\nhttps://api.cirrus-ci.com/v1/artifact/task/6186186539532288/coverage/coverage/00-index.html\n\nEyeballing it looks good. But GetActionsInPublication() isn't being hit at\nall?\n\nI think the queries in pg_dump should be written with the common portions of\nthe query outside the conditional.\n\n-- \nJustin\n\n\n", "msg_date": "Wed, 2 Mar 2022 18:29:21 -0600", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Mar 2, 2022 at 5:43 PM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> Attached is an updated patch, addressing most of the issues reported so\n> far. There are various minor tweaks, but the main changes are:\n...\n>\n> 3) checks of column filter vs. publish_via_partition_root and replica\n> identity, following the same logic as the row-filter patch (hopefully,\n> it touches the same places, using the same logic, ...)\n>\n> That means - with \"publish_via_partition_root=false\" it's not allowed to\n> specify column filters on partitioned tables, only for leaf partitions.\n>\n> And we check column filter vs. replica identity when adding tables to\n> publications, or whenever we change the replica identity.\n>\n\nThis handling is different from row filter work and I see problems\nwith it. The column list validation w.r.t primary key (default replica\nidentity) is missing. The handling of column list vs. partitions has\nmultiple problems: (a) In attach partition, the patch is just checking\nancestors for RI validation but what if the table being attached has\nfurther subpartitions; (b) I think the current locking also seems to\nhave problems because it is quite possible that while it validates the\nancestors here, concurrently someone changes the column list. I think\nit won't be enough to just change the locking mode because with the\ncurrent patch strategy during attach, we will be first taking locks\nfor child tables of current partition and then parent tables which can\npose deadlock hazards.\n\nThe columns list validation also needs to be done when we change\npublication action.\n\nThere could be more similar problems which I might have missed. For\nsome of these (except for concurrency issues), my colleague Shi-San\nhas done testing and the results are below [1]. I feel we should do RI\nvs. column list handling similar to row filter work (at one place) to\navoid all such hazards and possibly similar handling at various\nplaces, there is a good chance that we will miss some places or make\nmistakes that are not easy to catch. Do let me know if you think it\nmakes sense for me or one of the people who work on row filter patch\nto try this (make the handling of RI checks similar to row filter\nwork) and then we can see if that turns out to be a simple way to deal\nwith all these problems?\n\nSome other miscellaneous comments:\n=============================\n*\nIn get_rel_sync_entry(), the handling for partitioned tables doesn't\nseem to be correct. It can publish a different set of columns based on\nthe order of publications specified in the subscription.\n\nFor example:\n----\ncreate table parent (a int, b int, c int) partition by range (a);\ncreate table test_part1 (like parent);\nalter table parent attach partition test_part1 for values from (1) to (10);\n\ncreate publication pub for table parent(a) with (PUBLISH_VIA_PARTITION_ROOT);\ncreate publication pub2 for table test_part1(b);\n---\n\nNow, depending on the order of publications in the list while defining\nsubscription, the column list will change\n----\ncreate subscription sub connection 'port=10000 dbname=postgres'\npublication pub, pub2;\n\nFor the above, column list will be: (a)\n\ncreate subscription sub connection 'port=10000 dbname=postgres'\npublication pub2, pub;\n\nFor this one, the column list will be: (a, b)\n----\n\nTo avoid this, the column list should be computed based on the final\npublish_as_relid as we are doing for the row filter.\n\n*\nFetching column filter info in tablesync.c is quite expensive. It\nseems to be using four round-trips to get the complete info whereas\nfor row-filter we use just one round trip. I think we should try to\nget both row filter and column filter info in just one round trip.\n\n[1] -\nTest-1:\nThe patch doesn't check when the primary key changes.\n\ne.g.\n-- publisher --\ncreate table tbl(a int primary key, b int);\ncreate publication pub for table tbl(a);\nalter table tbl drop CONSTRAINT tbl_pkey;\nalter table tbl add primary key (b);\ninsert into tbl values (1,1);\n\n-- subscriber --\ncreate table tbl(a int, b int);\ncreate subscription sub connection 'port=5432 dbname=postgres' publication pub;\nupdate tbl set b=1 where a=1;\nalter table tbl add primary key (b);\n\n-- publisher --\ndelete from tbl;\n\nColumn \"b\" is part of replica identity, but it is filtered, which\ncaused an error on the subscriber side.\n\nERROR: publisher did not send replica identity column expected by the\nlogical replication target relation \"public.tbl\"\nCONTEXT: processing remote data during \"DELETE\" for replication\ntarget relation \"public.tbl\" in transaction 724 at 2022-03-04\n11:46:16.330892+08\n\nTest-2: Partitioned table RI w.r.t column list.\n2.1\nUsing \"create table ... partition of\".\n\ne.g.\n-- publisher --\ncreate table parent (a int, b int) partition by range (a);\ncreate publication pub for table parent(a)\nwith(publish_via_partition_root=true);\ncreate table child partition of parent (primary key (a,b)) default;\ninsert into parent values (1,1);\n\n-- subscriber --\ncreate table parent (a int, b int) partition by range (a);\ncreate table child partition of parent default;\ncreate subscription sub connection 'port=5432 dbname=postgres'\npublication pub; update child set b=1 where a=1;\nalter table parent add primary key (a,b);\n\n-- publisher --\ndelete from parent;\n\nColumn \"b\" is part of replica identity in the child table, but it is\nfiltered, which caused an error on the subscriber side.\n\nERROR: publisher did not send replica identity column expected by the\nlogical replication target relation \"public.parent\"\nCONTEXT: processing remote data during \"DELETE\" for replication\ntarget relation \"public.parent\" in transaction 723 at 2022-03-04\n15:15:39.776949+08\n\n2.2\nIt is likely that a table to be attached also has a partition.\n\ne.g.\n-- publisher --\ncreate table t1 (a int, b int) partition by range (a);\ncreate publication pub for table t1(b) with(publish_via_partition_root=true);\ncreate table t2 (a int, b int) partition by range (a);\ncreate table t3 (a int primary key, b int);\nalter table t2 attach partition t3 default;\nalter table t1 attach partition t2 default;\ninsert into t1 values (1,1);\n\n-- subscriber --\ncreate table t1 (a int, b int) partition by range (a);\ncreate table t2 (a int, b int) partition by range (a);\ncreate table t3 (a int, b int);\nalter table t2 attach partition t3 default;\nalter table t1 attach partition t2 default;\ncreate subscription sub connection 'port=5432 dbname=postgres' publication pub;\nupdate t1 set a=1 where b=1;\nalter table t1 add primary key (a);\n\n-- publisher --\ndelete from t1;\n\nColumn \"a\" is part of replica identity in table t3, but t3's ancestor\nt1 is published with column \"a\" filtered, which caused an error on the\nsubscriber side.\n\nERROR: publisher did not send replica identity column expected by the\nlogical replication target relation \"public.t1\"\nCONTEXT: processing remote data during \"DELETE\" for replication\ntarget relation \"public.t1\" in transaction 726 at 2022-03-04\n14:40:29.297392+08\n\n3.\nUsing \"alter publication pub set(publish='...'); \"\n\ne.g.\n-- publisher --\ncreate table tbl(a int primary key, b int); create publication pub for\ntable tbl(b) with(publish='insert'); insert into tbl values (1,1);\n\n-- subscriber --\ncreate table tbl(a int, b int);\ncreate subscription sub connection 'port=5432 dbname=postgres' publication pub;\n\n-- publisher --\nalter publication pub set(publish='insert,update');\n\n-- subscriber --\nupdate tbl set a=1 where b=1;\nalter table tbl add primary key (b);\n\n-- publisher --\nupdate tbl set a=2 where a=1;\n\nUpdates are replicated, and the column \"a\" is part of replica\nidentity, but it is filtered, which caused an error on the subscriber\nside.\n\nERROR: publisher did not send replica identity column expected by the\nlogical replication target relation \"public.tbl\"\nCONTEXT: processing remote data during \"UPDATE\" for replication\ntarget relation \"public.tbl\" in transaction 723 at 2022-03-04\n11:56:33.905843+08\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 4 Mar 2022 16:12:30 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/4/22 11:42, Amit Kapila wrote:\n> On Wed, Mar 2, 2022 at 5:43 PM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> Attached is an updated patch, addressing most of the issues reported so\n>> far. There are various minor tweaks, but the main changes are:\n> ...\n>>\n>> 3) checks of column filter vs. publish_via_partition_root and replica\n>> identity, following the same logic as the row-filter patch (hopefully,\n>> it touches the same places, using the same logic, ...)\n>>\n>> That means - with \"publish_via_partition_root=false\" it's not allowed to\n>> specify column filters on partitioned tables, only for leaf partitions.\n>>\n>> And we check column filter vs. replica identity when adding tables to\n>> publications, or whenever we change the replica identity.\n>>\n> \n> This handling is different from row filter work and I see problems\n> with it.\n\nBy different, I assume you mean I tried to enfoce the rules in ALTER\nPUBLICATION and other ALTER commands, instead of when modifying the\ndata? OK, I reworked this to do the same thing as the row filtering patch.\n\n> The column list validation w.r.t primary key (default replica\n> identity) is missing. The handling of column list vs. partitions has\n> multiple problems: (a) In attach partition, the patch is just checking\n> ancestors for RI validation but what if the table being attached has\n> further subpartitions; (b) I think the current locking also seems to\n> have problems because it is quite possible that while it validates the\n> ancestors here, concurrently someone changes the column list. I think\n> it won't be enough to just change the locking mode because with the\n> current patch strategy during attach, we will be first taking locks\n> for child tables of current partition and then parent tables which can\n> pose deadlock hazards.\n> > The columns list validation also needs to be done when we change\n> publication action.\n>\nI believe those issues should be solved by adopting the same approach as\nthe row-filtering patch, right?\n\n> There could be more similar problems which I might have missed. For\n> some of these (except for concurrency issues), my colleague Shi-San\n> has done testing and the results are below [1]. I feel we should do RI\n> vs. column list handling similar to row filter work (at one place) to\n> avoid all such hazards and possibly similar handling at various\n> places, there is a good chance that we will miss some places or make\n> mistakes that are not easy to catch.\n\nI agree if both patches use the same approach, that would reduce the\nrisk of missing the handling in one place, etc.\n\n> Do let me know if you think it makes sense for me or one of the \n> people who work on row filter patch to try this (make the handling of\n> RI checks similar to row filter work) and then we can see if that\n> turns out to be a simple way to deal with all these problems?\n> \n\nIf someone who is more familiar with the design conclusions from the row\nfiltering patch, that would be immensely useful. Especially now, when I\nreworked it to the same approach as the row filtering patch.\n\n\n> Some other miscellaneous comments:\n> =============================\n> *\n> In get_rel_sync_entry(), the handling for partitioned tables doesn't\n> seem to be correct. It can publish a different set of columns based on\n> the order of publications specified in the subscription.\n> \n> For example:\n> ----\n> create table parent (a int, b int, c int) partition by range (a);\n> create table test_part1 (like parent);\n> alter table parent attach partition test_part1 for values from (1) to (10);\n> \n> create publication pub for table parent(a) with (PUBLISH_VIA_PARTITION_ROOT);\n> create publication pub2 for table test_part1(b);\n> ---\n> \n> Now, depending on the order of publications in the list while defining\n> subscription, the column list will change\n> ----\n> create subscription sub connection 'port=10000 dbname=postgres'\n> publication pub, pub2;\n> \n> For the above, column list will be: (a)\n> \n> create subscription sub connection 'port=10000 dbname=postgres'\n> publication pub2, pub;\n> \n> For this one, the column list will be: (a, b)\n> ----\n> \n> To avoid this, the column list should be computed based on the final\n> publish_as_relid as we are doing for the row filter.\n> \n\nHmm, yeah. That seems like a genuine problem - it should not depend on\nthe order of publications in the subscription, I guess.\n\nBut is it an issue in the patch? Isn't that a pre-existing issue? AFAICS\nthe problem is that we initialize publish_as_relid=relid before the loop\nover publications, and then just update it. So the first iteration\nstarts with relid, but the second iteration ends with whatever value is\nset by the first iteration (e.g. the root).\n\nSo with the example you posted, we start with\n\n publish_as_relid = relid = test_part1\n\nbut then if the first publication is pubviaroot=true, we update it to\nparent. And in the second iteration, we fail to find the column filter,\nbecause \"parent\" (publish_as_relid) is not part of the pub2.\n\nIf we do it in the other order, we leave the publish_as_relid value as\nis (and find the filter), and then update it in the second iteration\n(and find the column filter too).\n\nNow, this can be resolved by re-calculating the publish_as_relid from\nscratch in each iteration (start with relid, then maybe update it). But\nthat's just half the story - the issue is there even without column\nfilters. Consider this example:\n\ncreate table t (a int, b int, c int) partition by range (a);\n\ncreate table t_1 partition of t for values from (1) to (10)\n partition by range (a);\n\ncreate table t_2 partition of t_1 for values from (1) to (10);\n\ncreate publication pub1 for table t(a)\n with (PUBLISH_VIA_PARTITION_ROOT);\n\ncreate publication pub2 for table t_1(a)\n with (PUBLISH_VIA_PARTITION_ROOT);\n\n\nNow, is you change subscribe to \"pub1, pub2\" and \"pub2, pub1\", we'll end\nup with different publish_as_relid values (t or t_1). Which seems like\nthe same ambiguity issue.\n\n\n> *\n> Fetching column filter info in tablesync.c is quite expensive. It\n> seems to be using four round-trips to get the complete info whereas\n> for row-filter we use just one round trip. I think we should try to\n> get both row filter and column filter info in just one round trip.\n> \n\nMaybe, but I really don't think this is an issue. The sync happens only\nvery rarely, and the rest of the sync (starting workers, copying data)\nis likely way more expensive than this.\n\n> [1] -\n> Test-1:\n> The patch doesn't check when the primary key changes.\n> \n> e.g.\n> -- publisher --\n> create table tbl(a int primary key, b int);\n> create publication pub for table tbl(a);\n> alter table tbl drop CONSTRAINT tbl_pkey;\n> alter table tbl add primary key (b);\n> insert into tbl values (1,1);\n> \n> -- subscriber --\n> create table tbl(a int, b int);\n> create subscription sub connection 'port=5432 dbname=postgres' publication pub;\n> update tbl set b=1 where a=1;\n> alter table tbl add primary key (b);\n> \n> -- publisher --\n> delete from tbl;\n> \n> Column \"b\" is part of replica identity, but it is filtered, which\n> caused an error on the subscriber side.\n> \n> ERROR: publisher did not send replica identity column expected by the\n> logical replication target relation \"public.tbl\"\n> CONTEXT: processing remote data during \"DELETE\" for replication\n> target relation \"public.tbl\" in transaction 724 at 2022-03-04\n> 11:46:16.330892+08\n> \n> Test-2: Partitioned table RI w.r.t column list.\n> 2.1\n> Using \"create table ... partition of\".\n> \n> e.g.\n> -- publisher --\n> create table parent (a int, b int) partition by range (a);\n> create publication pub for table parent(a)\n> with(publish_via_partition_root=true);\n> create table child partition of parent (primary key (a,b)) default;\n> insert into parent values (1,1);\n> \n> -- subscriber --\n> create table parent (a int, b int) partition by range (a);\n> create table child partition of parent default;\n> create subscription sub connection 'port=5432 dbname=postgres'\n> publication pub; update child set b=1 where a=1;\n> alter table parent add primary key (a,b);\n> \n> -- publisher --\n> delete from parent;\n> \n> Column \"b\" is part of replica identity in the child table, but it is\n> filtered, which caused an error on the subscriber side.\n> \n> ERROR: publisher did not send replica identity column expected by the\n> logical replication target relation \"public.parent\"\n> CONTEXT: processing remote data during \"DELETE\" for replication\n> target relation \"public.parent\" in transaction 723 at 2022-03-04\n> 15:15:39.776949+08\n> \n> 2.2\n> It is likely that a table to be attached also has a partition.\n> \n> e.g.\n> -- publisher --\n> create table t1 (a int, b int) partition by range (a);\n> create publication pub for table t1(b) with(publish_via_partition_root=true);\n> create table t2 (a int, b int) partition by range (a);\n> create table t3 (a int primary key, b int);\n> alter table t2 attach partition t3 default;\n> alter table t1 attach partition t2 default;\n> insert into t1 values (1,1);\n> \n> -- subscriber --\n> create table t1 (a int, b int) partition by range (a);\n> create table t2 (a int, b int) partition by range (a);\n> create table t3 (a int, b int);\n> alter table t2 attach partition t3 default;\n> alter table t1 attach partition t2 default;\n> create subscription sub connection 'port=5432 dbname=postgres' publication pub;\n> update t1 set a=1 where b=1;\n> alter table t1 add primary key (a);\n> \n> -- publisher --\n> delete from t1;\n> \n> Column \"a\" is part of replica identity in table t3, but t3's ancestor\n> t1 is published with column \"a\" filtered, which caused an error on the\n> subscriber side.\n> \n> ERROR: publisher did not send replica identity column expected by the\n> logical replication target relation \"public.t1\"\n> CONTEXT: processing remote data during \"DELETE\" for replication\n> target relation \"public.t1\" in transaction 726 at 2022-03-04\n> 14:40:29.297392+08\n> \n> 3.\n> Using \"alter publication pub set(publish='...'); \"\n> \n> e.g.\n> -- publisher --\n> create table tbl(a int primary key, b int); create publication pub for\n> table tbl(b) with(publish='insert'); insert into tbl values (1,1);\n> \n> -- subscriber --\n> create table tbl(a int, b int);\n> create subscription sub connection 'port=5432 dbname=postgres' publication pub;\n> \n> -- publisher --\n> alter publication pub set(publish='insert,update');\n> \n> -- subscriber --\n> update tbl set a=1 where b=1;\n> alter table tbl add primary key (b);\n> \n> -- publisher --\n> update tbl set a=2 where a=1;\n> \n> Updates are replicated, and the column \"a\" is part of replica\n> identity, but it is filtered, which caused an error on the subscriber\n> side.\n> \n> ERROR: publisher did not send replica identity column expected by the\n> logical replication target relation \"public.tbl\"\n> CONTEXT: processing remote data during \"UPDATE\" for replication\n> target relation \"public.tbl\" in transaction 723 at 2022-03-04\n> 11:56:33.905843+08\n> \n\nAFAICS these issues should be resolved by the adoption of the row-filter\napproach (i.e. it should fail the same way as for row filter).\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Mon, 7 Mar 2022 16:18:04 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 07.03.22 16:18, Tomas Vondra wrote:\n> AFAICS these issues should be resolved by the adoption of the row-filter\n> approach (i.e. it should fail the same way as for row filter).\n\nThe first two patches (additional testing for row filtering feature) \nlook okay to me.\n\nAttached is a fixup patch for your main feature patch (the third one).\n\nIt's a bit of code and documentation cleanup, but mainly I removed the \nterm \"column filter\" from the patch. Half the code was using \"column \nlist\" or similar and half the code \"column filter\", which was confusing. \n Also, there seemed to be a bit of copy-and-pasting from row-filter \ncode going on, with some code comments not quite sensible, so I rewrote \nsome of them. Also some code used \"rf\" and \"cf\" symbols which were a \nbit hard to tell apart. A few more letters can increase readability.\n\nNote in publicationcmds.c OpenTableList() the wrong if condition was used.\n\nI'm still confused about the intended replica identity handling. This \npatch still checks whether the column list contains the replica identity \nat DDL time. And then it also checks at execution time. I thought the \nlatest understanding was that the DDL-time checking would be removed. I \nthink it's basically useless now, since as the test cases show, you can \nsubvert those checks by altering the replica identity later.", "msg_date": "Wed, 9 Mar 2022 10:20:57 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Mar 7, 2022 at 8:48 PM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/4/22 11:42, Amit Kapila wrote:\n> > On Wed, Mar 2, 2022 at 5:43 PM Tomas Vondra\n> > <tomas.vondra@enterprisedb.com> wrote:\n> >>\n> >> Attached is an updated patch, addressing most of the issues reported so\n> >> far. There are various minor tweaks, but the main changes are:\n> > ...\n> >>\n> >> 3) checks of column filter vs. publish_via_partition_root and replica\n> >> identity, following the same logic as the row-filter patch (hopefully,\n> >> it touches the same places, using the same logic, ...)\n> >>\n> >> That means - with \"publish_via_partition_root=false\" it's not allowed to\n> >> specify column filters on partitioned tables, only for leaf partitions.\n> >>\n> >> And we check column filter vs. replica identity when adding tables to\n> >> publications, or whenever we change the replica identity.\n> >>\n> >\n> > This handling is different from row filter work and I see problems\n> > with it.\n>\n> By different, I assume you mean I tried to enfoce the rules in ALTER\n> PUBLICATION and other ALTER commands, instead of when modifying the\n> data?\n>\n\nYes.\n\n> OK, I reworked this to do the same thing as the row filtering patch.\n>\n\nThanks, I'll check this.\n\n> > The column list validation w.r.t primary key (default replica\n> > identity) is missing. The handling of column list vs. partitions has\n> > multiple problems: (a) In attach partition, the patch is just checking\n> > ancestors for RI validation but what if the table being attached has\n> > further subpartitions; (b) I think the current locking also seems to\n> > have problems because it is quite possible that while it validates the\n> > ancestors here, concurrently someone changes the column list. I think\n> > it won't be enough to just change the locking mode because with the\n> > current patch strategy during attach, we will be first taking locks\n> > for child tables of current partition and then parent tables which can\n> > pose deadlock hazards.\n> > > The columns list validation also needs to be done when we change\n> > publication action.\n> >\n> I believe those issues should be solved by adopting the same approach as\n> the row-filtering patch, right?\n>\n\nRight.\n\n>\n> > Some other miscellaneous comments:\n> > =============================\n> > *\n> > In get_rel_sync_entry(), the handling for partitioned tables doesn't\n> > seem to be correct. It can publish a different set of columns based on\n> > the order of publications specified in the subscription.\n> >\n> > For example:\n> > ----\n> > create table parent (a int, b int, c int) partition by range (a);\n> > create table test_part1 (like parent);\n> > alter table parent attach partition test_part1 for values from (1) to (10);\n> >\n> > create publication pub for table parent(a) with (PUBLISH_VIA_PARTITION_ROOT);\n> > create publication pub2 for table test_part1(b);\n> > ---\n> >\n> > Now, depending on the order of publications in the list while defining\n> > subscription, the column list will change\n> > ----\n> > create subscription sub connection 'port=10000 dbname=postgres'\n> > publication pub, pub2;\n> >\n> > For the above, column list will be: (a)\n> >\n> > create subscription sub connection 'port=10000 dbname=postgres'\n> > publication pub2, pub;\n> >\n> > For this one, the column list will be: (a, b)\n> > ----\n> >\n> > To avoid this, the column list should be computed based on the final\n> > publish_as_relid as we are doing for the row filter.\n> >\n>\n> Hmm, yeah. That seems like a genuine problem - it should not depend on\n> the order of publications in the subscription, I guess.\n>\n> But is it an issue in the patch? Isn't that a pre-existing issue? AFAICS\n> the problem is that we initialize publish_as_relid=relid before the loop\n> over publications, and then just update it. So the first iteration\n> starts with relid, but the second iteration ends with whatever value is\n> set by the first iteration (e.g. the root).\n>\n> So with the example you posted, we start with\n>\n> publish_as_relid = relid = test_part1\n>\n> but then if the first publication is pubviaroot=true, we update it to\n> parent. And in the second iteration, we fail to find the column filter,\n> because \"parent\" (publish_as_relid) is not part of the pub2.\n>\n> If we do it in the other order, we leave the publish_as_relid value as\n> is (and find the filter), and then update it in the second iteration\n> (and find the column filter too).\n>\n> Now, this can be resolved by re-calculating the publish_as_relid from\n> scratch in each iteration (start with relid, then maybe update it). But\n> that's just half the story - the issue is there even without column\n> filters. Consider this example:\n>\n> create table t (a int, b int, c int) partition by range (a);\n>\n> create table t_1 partition of t for values from (1) to (10)\n> partition by range (a);\n>\n> create table t_2 partition of t_1 for values from (1) to (10);\n>\n> create publication pub1 for table t(a)\n> with (PUBLISH_VIA_PARTITION_ROOT);\n>\n> create publication pub2 for table t_1(a)\n> with (PUBLISH_VIA_PARTITION_ROOT);\n>\n>\n> Now, is you change subscribe to \"pub1, pub2\" and \"pub2, pub1\", we'll end\n> up with different publish_as_relid values (t or t_1). Which seems like\n> the same ambiguity issue.\n>\n\nI think we should fix this existing problem by always using the\ntop-most table as publish_as_relid. Basically, we can check, if the\nexisting publish_as_relid is an ancestor of a new rel that is going to\nreplace it then we shouldn't replace it. However, I think even if we\nfix the existing problem, we will still have the order problem for the\ncolumn filter patch, and to avoid that instead of fetching column\nfilters in the publication loop, we should use the final\npublish_as_relid. I think it will have another problem as well if we\ndon't use final publish_as_relid which is that sometimes when we\nshould not use any filter (say when pubviaroot is true and that\npublication has root partitioned table which has no filter) as per our\nrule of filters for a partitioned table, it can still use some filter\nfrom the non-root table.\n\n>\n> > *\n> > Fetching column filter info in tablesync.c is quite expensive. It\n> > seems to be using four round-trips to get the complete info whereas\n> > for row-filter we use just one round trip. I think we should try to\n> > get both row filter and column filter info in just one round trip.\n> >\n>\n> Maybe, but I really don't think this is an issue.\n>\n\nI am not sure but it might matter for small tables. Leaving aside the\nperformance issue, I think the current way will get the wrong column\nlist in many cases: (a) The ALL TABLES IN SCHEMA case handling won't\nwork for partitioned tables when the partitioned table is part of one\nschema and partition table is part of another schema. (b) The handling\nof partition tables in other cases will fetch incorrect lists as it\ntries to fetch the column list of all the partitions in the hierarchy.\n\nOne of my colleagues has even tested these cases both for column\nfilters and row filters and we find the behavior of row filter is okay\nwhereas for column filter it uses the wrong column list. We will share\nthe tests and results with you in a later email. We are trying to\nunify the column filter queries with row filter to make their behavior\nthe same and will share the findings once it is done. I hope if we are\nable to achieve this that we will reduce the chances of bugs in this\narea.\n\nNote: I think the first two patches for tests are not required after\ncommit ceb57afd3c.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 9 Mar 2022 15:33:40 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wednesday, March 9, 2022 6:04 PM Amit Kapila <amit.kapila16@gmail.com>\r\n> On Mon, Mar 7, 2022 at 8:48 PM Tomas Vondra\r\n> <tomas.vondra@enterprisedb.com> wrote:\r\n> >\r\n> > On 3/4/22 11:42, Amit Kapila wrote:\r\n> >\r\n> > > *\r\n> > > Fetching column filter info in tablesync.c is quite expensive. It\r\n> > > seems to be using four round-trips to get the complete info whereas\r\n> > > for row-filter we use just one round trip. I think we should try to\r\n> > > get both row filter and column filter info in just one round trip.\r\n> > >\r\n> >\r\n> > Maybe, but I really don't think this is an issue.\r\n> >\r\n> \r\n> I am not sure but it might matter for small tables. Leaving aside the\r\n> performance issue, I think the current way will get the wrong column list in\r\n> many cases: (a) The ALL TABLES IN SCHEMA case handling won't work for\r\n> partitioned tables when the partitioned table is part of one schema and\r\n> partition table is part of another schema. (b) The handling of partition tables in\r\n> other cases will fetch incorrect lists as it tries to fetch the column list of all the\r\n> partitions in the hierarchy.\r\n> \r\n> One of my colleagues has even tested these cases both for column filters and\r\n> row filters and we find the behavior of row filter is okay whereas for column\r\n> filter it uses the wrong column list. We will share the tests and results with you\r\n> in a later email. We are trying to unify the column filter queries with row filter to\r\n> make their behavior the same and will share the findings once it is done. I hope\r\n> if we are able to achieve this that we will reduce the chances of bugs in this area.\r\n> \r\n> Note: I think the first two patches for tests are not required after commit\r\n> ceb57afd3c.\r\n\r\nHi,\r\n\r\nHere are some tests and results about the table sync query of\r\ncolumn filter patch and row filter.\r\n\r\n1) multiple publications which publish schema of parent table and partition.\r\n----pub\r\ncreate schema s1;\r\ncreate table s1.t (a int, b int, c int) partition by range (a);\r\ncreate table t_1 partition of s1.t for values from (1) to (10);\r\ncreate publication pub1 for all tables in schema s1;\r\ncreate publication pub2 for table t_1(b);\r\n\r\n----sub\r\n- prepare tables\r\nCREATE SUBSCRIPTION sub CONNECTION 'port=10000 dbname=postgres' PUBLICATION pub1, pub2;\r\n\r\nWhen doing table sync for 't_1', the column list will be (b). I think it should\r\nbe no filter because table t_1 is also published via ALL TABLES IN SCHMEA\r\npublication.\r\n\r\nFor Row Filter, it will use no filter for this case.\r\n\r\n\r\n2) one publication publishes both parent and child\r\n----pub\r\ncreate table t (a int, b int, c int) partition by range (a);\r\ncreate table t_1 partition of t for values from (1) to (10)\r\n partition by range (a);\r\ncreate table t_2 partition of t_1 for values from (1) to (10);\r\n\r\ncreate publication pub2 for table t_1(a), t_2\r\n with (PUBLISH_VIA_PARTITION_ROOT);\r\n\r\n----sub\r\n- prepare tables\r\nCREATE SUBSCRIPTION sub CONNECTION 'port=10000 dbname=postgres' PUBLICATION pub2;\r\n\r\nWhen doing table sync for table 't_1', it has no column list. I think the\r\nexpected column list is (a).\r\n\r\nFor Row Filter, it will use the row filter of the top most parent table(t_1) in\r\nthis case.\r\n\r\n\r\n3) one publication publishes both parent and child\r\n----pub\r\ncreate table t (a int, b int, c int) partition by range (a);\r\ncreate table t_1 partition of t for values from (1) to (10)\r\n partition by range (a);\r\ncreate table t_2 partition of t_1 for values from (1) to (10);\r\n\r\ncreate publication pub2 for table t_1(a), t_2(b)\r\n with (PUBLISH_VIA_PARTITION_ROOT);\r\n\r\n----sub\r\n- prepare tables\r\nCREATE SUBSCRIPTION sub CONNECTION 'port=10000 dbname=postgres' PUBLICATION pub2;\r\n\r\nWhen doing table sync for table 't_1', the column list would be (a, b). I think\r\nthe expected column list is (a).\r\n\r\nFor Row Filter, it will use the row filter of the top most parent table(t_1) in\r\nthis case.\r\n\r\nBest regards,\r\nHou zj\r\n", "msg_date": "Wed, 9 Mar 2022 10:12:01 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/9/22 11:03, Amit Kapila wrote:\n> ...\n>> Hmm, yeah. That seems like a genuine problem - it should not depend on\n>> the order of publications in the subscription, I guess.\n>>\n>> But is it an issue in the patch? Isn't that a pre-existing issue? AFAICS\n>> the problem is that we initialize publish_as_relid=relid before the loop\n>> over publications, and then just update it. So the first iteration\n>> starts with relid, but the second iteration ends with whatever value is\n>> set by the first iteration (e.g. the root).\n>>\n>> So with the example you posted, we start with\n>>\n>> publish_as_relid = relid = test_part1\n>>\n>> but then if the first publication is pubviaroot=true, we update it to\n>> parent. And in the second iteration, we fail to find the column filter,\n>> because \"parent\" (publish_as_relid) is not part of the pub2.\n>>\n>> If we do it in the other order, we leave the publish_as_relid value as\n>> is (and find the filter), and then update it in the second iteration\n>> (and find the column filter too).\n>>\n>> Now, this can be resolved by re-calculating the publish_as_relid from\n>> scratch in each iteration (start with relid, then maybe update it). But\n>> that's just half the story - the issue is there even without column\n>> filters. Consider this example:\n>>\n>> create table t (a int, b int, c int) partition by range (a);\n>>\n>> create table t_1 partition of t for values from (1) to (10)\n>> partition by range (a);\n>>\n>> create table t_2 partition of t_1 for values from (1) to (10);\n>>\n>> create publication pub1 for table t(a)\n>> with (PUBLISH_VIA_PARTITION_ROOT);\n>>\n>> create publication pub2 for table t_1(a)\n>> with (PUBLISH_VIA_PARTITION_ROOT);\n>>\n>>\n>> Now, is you change subscribe to \"pub1, pub2\" and \"pub2, pub1\", we'll end\n>> up with different publish_as_relid values (t or t_1). Which seems like\n>> the same ambiguity issue.\n>>\n> \n> I think we should fix this existing problem by always using the\n> top-most table as publish_as_relid. Basically, we can check, if the\n> existing publish_as_relid is an ancestor of a new rel that is going to\n> replace it then we shouldn't replace it.\n\nRight, using the topmost relid from all publications seems like the\ncorrect solution.\n\n> However, I think even if we\n> fix the existing problem, we will still have the order problem for the\n> column filter patch, and to avoid that instead of fetching column\n> filters in the publication loop, we should use the final\n> publish_as_relid. I think it will have another problem as well if we\n> don't use final publish_as_relid which is that sometimes when we\n> should not use any filter (say when pubviaroot is true and that\n> publication has root partitioned table which has no filter) as per our\n> rule of filters for a partitioned table, it can still use some filter\n> from the non-root table.\n> \n\nYeah, the current behavior is just a consequence of how we determine\npublish_as_relid now. If we rework that, we should first determine the\nrelid and then fetch the filter only for that single rel.\n\n>>\n>>> *\n>>> Fetching column filter info in tablesync.c is quite expensive. It\n>>> seems to be using four round-trips to get the complete info whereas\n>>> for row-filter we use just one round trip. I think we should try to\n>>> get both row filter and column filter info in just one round trip.\n>>>\n>>\n>> Maybe, but I really don't think this is an issue.\n>>\n> \n> I am not sure but it might matter for small tables. Leaving aside the\n> performance issue, I think the current way will get the wrong column\n> list in many cases: (a) The ALL TABLES IN SCHEMA case handling won't\n> work for partitioned tables when the partitioned table is part of one\n> schema and partition table is part of another schema. (b) The handling\n> of partition tables in other cases will fetch incorrect lists as it\n> tries to fetch the column list of all the partitions in the hierarchy.\n> \n> One of my colleagues has even tested these cases both for column\n> filters and row filters and we find the behavior of row filter is okay\n> whereas for column filter it uses the wrong column list. We will share\n> the tests and results with you in a later email. We are trying to\n> unify the column filter queries with row filter to make their behavior\n> the same and will share the findings once it is done. I hope if we are\n> able to achieve this that we will reduce the chances of bugs in this\n> area.\n> \n\nOK, I'll take a look at that email.\n\n> Note: I think the first two patches for tests are not required after\n> commit ceb57afd3c.\n> \n\nRight. Will remove.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Wed, 9 Mar 2022 14:53:12 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Mar 9, 2022 at 3:33 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Mon, Mar 7, 2022 at 8:48 PM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>\n> > OK, I reworked this to do the same thing as the row filtering patch.\n> >\n>\n> Thanks, I'll check this.\n>\n\nSome assorted comments:\n=====================\n1. We don't need to send a column list for the old tuple in case of an\nupdate (similar to delete). It is not required to apply a column\nfilter for those cases because we ensure that RI must be part of the\ncolumn list for updates and deletes.\n2.\n+ /*\n+ * Check if all columns referenced in the column filter are part of\n+ * the REPLICA IDENTITY index or not.\n\nI think this comment is reverse. The rule we follow here is that\nattributes that are part of RI must be there in a specified column\nlist. This is used at two places in the patch.\n3. get_rel_sync_entry()\n{\n/* XXX is there a danger of memory leak here? beware */\n+ oldctx = MemoryContextSwitchTo(CacheMemoryContext);\n+ for (int i = 0; i < nelems; i++)\n...\n}\n\nSimilar to the row filter, I think we need to use\nentry->cache_expr_cxt to allocate this. There are other usages of\nCacheMemoryContext in this part of the code but I think those need to\nbe also changed and we can do that as a separate patch. If we do the\nsuggested change then we don't need to separately free columns.\n4. I think we don't need the DDL changes in AtExecDropColumn. Instead,\nwe can change the dependency of columns to NORMAL during publication\ncommands.\n5. There is a reference to check_publication_columns but that function\nis removed from the patch.\n6.\n/*\n* If we know everything is replicated and the row filter is invalid\n* for update and delete, there is no point to check for other\n* publications.\n*/\nif (pubdesc->pubactions.pubinsert && pubdesc->pubactions.pubupdate &&\npubdesc->pubactions.pubdelete && pubdesc->pubactions.pubtruncate &&\n!pubdesc->rf_valid_for_update && !pubdesc->rf_valid_for_delete)\nbreak;\n\n/*\n* If we know everything is replicated and the column filter is invalid\n* for update and delete, there is no point to check for other\n* publications.\n*/\nif (pubdesc->pubactions.pubinsert && pubdesc->pubactions.pubupdate &&\npubdesc->pubactions.pubdelete && pubdesc->pubactions.pubtruncate &&\n!pubdesc->cf_valid_for_update && !pubdesc->cf_valid_for_delete)\nbreak;\n\nCan we combine these two checks?\n\nI feel this patch needs a more thorough review.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 10 Mar 2022 08:39:46 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/9/22 11:12, houzj.fnst@fujitsu.com wrote:\n> Hi,\n> \n> Here are some tests and results about the table sync query of\n> column filter patch and row filter.\n> \n> 1) multiple publications which publish schema of parent table and partition.\n> ----pub\n> create schema s1;\n> create table s1.t (a int, b int, c int) partition by range (a);\n> create table t_1 partition of s1.t for values from (1) to (10);\n> create publication pub1 for all tables in schema s1;\n> create publication pub2 for table t_1(b);\n> \n> ----sub\n> - prepare tables\n> CREATE SUBSCRIPTION sub CONNECTION 'port=10000 dbname=postgres' PUBLICATION pub1, pub2;\n> \n> When doing table sync for 't_1', the column list will be (b). I think it should\n> be no filter because table t_1 is also published via ALL TABLES IN SCHMEA\n> publication.\n> \n> For Row Filter, it will use no filter for this case.\n> \n> \n> 2) one publication publishes both parent and child\n> ----pub\n> create table t (a int, b int, c int) partition by range (a);\n> create table t_1 partition of t for values from (1) to (10)\n> partition by range (a);\n> create table t_2 partition of t_1 for values from (1) to (10);\n> \n> create publication pub2 for table t_1(a), t_2\n> with (PUBLISH_VIA_PARTITION_ROOT);\n> \n> ----sub\n> - prepare tables\n> CREATE SUBSCRIPTION sub CONNECTION 'port=10000 dbname=postgres' PUBLICATION pub2;\n> \n> When doing table sync for table 't_1', it has no column list. I think the\n> expected column list is (a).\n> \n> For Row Filter, it will use the row filter of the top most parent table(t_1) in\n> this case.\n> \n> \n> 3) one publication publishes both parent and child\n> ----pub\n> create table t (a int, b int, c int) partition by range (a);\n> create table t_1 partition of t for values from (1) to (10)\n> partition by range (a);\n> create table t_2 partition of t_1 for values from (1) to (10);\n> \n> create publication pub2 for table t_1(a), t_2(b)\n> with (PUBLISH_VIA_PARTITION_ROOT);\n> \n> ----sub\n> - prepare tables\n> CREATE SUBSCRIPTION sub CONNECTION 'port=10000 dbname=postgres' PUBLICATION pub2;\n> \n> When doing table sync for table 't_1', the column list would be (a, b). I think\n> the expected column list is (a).\n> \n> For Row Filter, it will use the row filter of the top most parent table(t_1) in\n> this case.\n> \n\nAttached is an updated patch version, addressing all of those issues.\n\n0001 is a bugfix, reworking how we calculate publish_as_relid. The old\napproach was unstable with multiple publications, giving different\nresults depending on order of the publications. This should be\nbackpatched into PG13 where publish_via_partition_root was introduced, I\nthink.\n\n0002 is the main patch, merging the changes proposed by Peter and fixing\nthe issues reported here. In most cases this means adopting the code\nused for row filters, and perhaps simplifying it a bit.\n\n\nBut I also tried to implement a row-filter test for 0001, and I'm not\nsure I understand the behavior I observe. Consider this:\n\n-- a chain of 3 partitions (on both publisher and subscriber)\nCREATE TABLE test_part_rf (a int primary key, b int, c int)\n PARTITION BY LIST (a);\n\nCREATE TABLE test_part_rf_1\n PARTITION OF test_part_rf FOR VALUES IN (1,2,3,4,5)\n PARTITION BY LIST (a);\n\nCREATE TABLE test_part_rf_2\n PARTITION OF test_part_rf_1 FOR VALUES IN (1,2,3,4,5);\n\n-- initial data\nINSERT INTO test_part_rf VALUES (1, 5, 100);\nINSERT INTO test_part_rf VALUES (2, 15, 200);\n\n-- two publications, each adding a different partition\nCREATE PUBLICATION test_pub_part_1 FOR TABLE test_part_rf_1\n WHERE (b < 10) WITH (publish_via_partition_root);\n\nCREATE PUBLICATION test_pub_part_2 FOR TABLE test_part_rf_2\n WHERE (b > 10) WITH (publish_via_partition_root);\n\n-- now create the subscription (also try opposite ordering)\nCREATE SUBSCRIPTION test_part_sub CONNECTION '...'\n PUBLICATION test_pub_part_1, test_pub_part_2;\n\n-- wait for sync\n\n-- inert some more data\nINSERT INTO test_part_rf VALUES (3, 6, 300);\nINSERT INTO test_part_rf VALUES (4, 16, 400);\n\n-- wait for catchup\n\nNow, based on the discussion here, my expectation is that we'll use the\nrow filter from the top-most ancestor in any publication, which in this\ncase is test_part_rf_1. Hence the filter should be (b < 10).\n\nSo I'd expect these rows to be replicated:\n\n1,5,100\n3,6,300\n\nBut that's not what I get, unfortunately. I get different results,\ndepending on the order of publications:\n\n1) test_pub_part_1, test_pub_part_2\n\n1|5|100\n2|15|200\n3|6|300\n4|16|400\n\n2) test_pub_part_2, test_pub_part_1\n\n3|6|300\n4|16|400\n\nThat seems pretty bizarre, because it either means we're not enforcing\nany filter or some strange combination of filters (notice that for (2)\nwe skip/replicate rows matching either filter).\n\nI have to be missing something important, but this seems confusing.\nThere's a patch adding a simple test case to 028_row_filter.sql (named\n.txt, so as not to confuse cfbot).\n\n\nFWIW I'm not convinced applying just the filters (both row and column)\nis the right approach. It might be OK for a single publication, but with\nmultiple publications not so much. If you list multiple publications for\na subscription, it seems natural to expect a union of all the data, a\nbit as if there were multiple subscriptions. But what you actually get\nis some subset, depending on what other relations the other publications\ninclude.\n\nOf course, this only happens if the publications include different\nancestors. If all include the same ancestor, everything works fine and\nyou get the \"union\" of data.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Thu, 10 Mar 2022 19:17:15 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/9/22 10:20, Peter Eisentraut wrote:\n> \n> On 07.03.22 16:18, Tomas Vondra wrote:\n>> AFAICS these issues should be resolved by the adoption of the row-filter\n>> approach (i.e. it should fail the same way as for row filter).\n> \n> The first two patches (additional testing for row filtering feature)\n> look okay to me.\n> \n> Attached is a fixup patch for your main feature patch (the third one).\n> \n> It's a bit of code and documentation cleanup, but mainly I removed the\n> term \"column filter\" from the patch.  Half the code was using \"column\n> list\" or similar and half the code \"column filter\", which was confusing.\n>  Also, there seemed to be a bit of copy-and-pasting from row-filter code\n> going on, with some code comments not quite sensible, so I rewrote some\n> of them.  Also some code used \"rf\" and \"cf\" symbols which were a bit\n> hard to tell apart.  A few more letters can increase readability.\n> \n> Note in publicationcmds.c OpenTableList() the wrong if condition was used.\n> \n\nThanks, I've merged these changes into the patch.\n\n> I'm still confused about the intended replica identity handling.  This\n> patch still checks whether the column list contains the replica identity\n> at DDL time.  And then it also checks at execution time.  I thought the\n> latest understanding was that the DDL-time checking would be removed.  I\n> think it's basically useless now, since as the test cases show, you can\n> subvert those checks by altering the replica identity later.\n\nAre you sure? Which part of the patch does that? AFAICS we only do those\nchecks in CheckCmdReplicaIdentity now, but maybe I'm missing something.\nAre you sure you're not looking at some older patch version?\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Thu, 10 Mar 2022 19:20:10 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/10/22 19:17, Tomas Vondra wrote:\n> On 3/9/22 11:12, houzj.fnst@fujitsu.com wrote:\n>> Hi,\n>>\n>> Here are some tests and results about the table sync query of\n>> column filter patch and row filter.\n>>\n>> 1) multiple publications which publish schema of parent table and partition.\n>> ----pub\n>> create schema s1;\n>> create table s1.t (a int, b int, c int) partition by range (a);\n>> create table t_1 partition of s1.t for values from (1) to (10);\n>> create publication pub1 for all tables in schema s1;\n>> create publication pub2 for table t_1(b);\n>>\n>> ----sub\n>> - prepare tables\n>> CREATE SUBSCRIPTION sub CONNECTION 'port=10000 dbname=postgres' PUBLICATION pub1, pub2;\n>>\n>> When doing table sync for 't_1', the column list will be (b). I think it should\n>> be no filter because table t_1 is also published via ALL TABLES IN SCHMEA\n>> publication.\n>>\n>> For Row Filter, it will use no filter for this case.\n>>\n>>\n>> 2) one publication publishes both parent and child\n>> ----pub\n>> create table t (a int, b int, c int) partition by range (a);\n>> create table t_1 partition of t for values from (1) to (10)\n>> partition by range (a);\n>> create table t_2 partition of t_1 for values from (1) to (10);\n>>\n>> create publication pub2 for table t_1(a), t_2\n>> with (PUBLISH_VIA_PARTITION_ROOT);\n>>\n>> ----sub\n>> - prepare tables\n>> CREATE SUBSCRIPTION sub CONNECTION 'port=10000 dbname=postgres' PUBLICATION pub2;\n>>\n>> When doing table sync for table 't_1', it has no column list. I think the\n>> expected column list is (a).\n>>\n>> For Row Filter, it will use the row filter of the top most parent table(t_1) in\n>> this case.\n>>\n>>\n>> 3) one publication publishes both parent and child\n>> ----pub\n>> create table t (a int, b int, c int) partition by range (a);\n>> create table t_1 partition of t for values from (1) to (10)\n>> partition by range (a);\n>> create table t_2 partition of t_1 for values from (1) to (10);\n>>\n>> create publication pub2 for table t_1(a), t_2(b)\n>> with (PUBLISH_VIA_PARTITION_ROOT);\n>>\n>> ----sub\n>> - prepare tables\n>> CREATE SUBSCRIPTION sub CONNECTION 'port=10000 dbname=postgres' PUBLICATION pub2;\n>>\n>> When doing table sync for table 't_1', the column list would be (a, b). I think\n>> the expected column list is (a).\n>>\n>> For Row Filter, it will use the row filter of the top most parent table(t_1) in\n>> this case.\n>>\n> \n> Attached is an updated patch version, addressing all of those issues.\n> \n> 0001 is a bugfix, reworking how we calculate publish_as_relid. The old\n> approach was unstable with multiple publications, giving different\n> results depending on order of the publications. This should be\n> backpatched into PG13 where publish_via_partition_root was introduced, I\n> think.\n> \n> 0002 is the main patch, merging the changes proposed by Peter and fixing\n> the issues reported here. In most cases this means adopting the code\n> used for row filters, and perhaps simplifying it a bit.\n> \n> \n> But I also tried to implement a row-filter test for 0001, and I'm not\n> sure I understand the behavior I observe. Consider this:\n> \n> -- a chain of 3 partitions (on both publisher and subscriber)\n> CREATE TABLE test_part_rf (a int primary key, b int, c int)\n> PARTITION BY LIST (a);\n> \n> CREATE TABLE test_part_rf_1\n> PARTITION OF test_part_rf FOR VALUES IN (1,2,3,4,5)\n> PARTITION BY LIST (a);\n> \n> CREATE TABLE test_part_rf_2\n> PARTITION OF test_part_rf_1 FOR VALUES IN (1,2,3,4,5);\n> \n> -- initial data\n> INSERT INTO test_part_rf VALUES (1, 5, 100);\n> INSERT INTO test_part_rf VALUES (2, 15, 200);\n> \n> -- two publications, each adding a different partition\n> CREATE PUBLICATION test_pub_part_1 FOR TABLE test_part_rf_1\n> WHERE (b < 10) WITH (publish_via_partition_root);\n> \n> CREATE PUBLICATION test_pub_part_2 FOR TABLE test_part_rf_2\n> WHERE (b > 10) WITH (publish_via_partition_root);\n> \n> -- now create the subscription (also try opposite ordering)\n> CREATE SUBSCRIPTION test_part_sub CONNECTION '...'\n> PUBLICATION test_pub_part_1, test_pub_part_2;\n> \n> -- wait for sync\n> \n> -- inert some more data\n> INSERT INTO test_part_rf VALUES (3, 6, 300);\n> INSERT INTO test_part_rf VALUES (4, 16, 400);\n> \n> -- wait for catchup\n> \n> Now, based on the discussion here, my expectation is that we'll use the\n> row filter from the top-most ancestor in any publication, which in this\n> case is test_part_rf_1. Hence the filter should be (b < 10).\n> \n> So I'd expect these rows to be replicated:\n> \n> 1,5,100\n> 3,6,300\n> \n> But that's not what I get, unfortunately. I get different results,\n> depending on the order of publications:\n> \n> 1) test_pub_part_1, test_pub_part_2\n> \n> 1|5|100\n> 2|15|200\n> 3|6|300\n> 4|16|400\n> \n> 2) test_pub_part_2, test_pub_part_1\n> \n> 3|6|300\n> 4|16|400\n> \n> That seems pretty bizarre, because it either means we're not enforcing\n> any filter or some strange combination of filters (notice that for (2)\n> we skip/replicate rows matching either filter).\n> \n> I have to be missing something important, but this seems confusing.\n> There's a patch adding a simple test case to 028_row_filter.sql (named\n> .txt, so as not to confuse cfbot).\n> \n\nFWIW I think the reason is pretty simple - pgoutput_row_filter_init is\nbroken. It assumes you can just do this\n\nrftuple = SearchSysCache2(PUBLICATIONRELMAP,\n ObjectIdGetDatum(entry->publish_as_relid),\n ObjectIdGetDatum(pub->oid));\n\nif (HeapTupleIsValid(rftuple))\n{\n /* Null indicates no filter. */\n rfdatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple,\n Anum_pg_publication_rel_prqual,\n &pub_no_filter);\n}\nelse\n{\n pub_no_filter = true;\n}\n\n\nand pub_no_filter=true means there's no filter at all. Which is\nnonsense, because we're using publish_as_relid here - the publication\nmay not include this particular ancestor, in which case we need to just\nignore this publication.\n\nSo yeah, this needs to be reworked.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Thu, 10 Mar 2022 20:10:01 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/10/22 04:09, Amit Kapila wrote:\n> On Wed, Mar 9, 2022 at 3:33 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>>\n>> On Mon, Mar 7, 2022 at 8:48 PM Tomas Vondra\n>> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>>> OK, I reworked this to do the same thing as the row filtering patch.\n>>>\n>>\n>> Thanks, I'll check this.\n>>\n> \n> Some assorted comments:\n> =====================\n> 1. We don't need to send a column list for the old tuple in case of an\n> update (similar to delete). It is not required to apply a column\n> filter for those cases because we ensure that RI must be part of the\n> column list for updates and deletes.\n\nI'm not sure which part of the code does this refer to?\n\n> 2.\n> + /*\n> + * Check if all columns referenced in the column filter are part of\n> + * the REPLICA IDENTITY index or not.\n> \n> I think this comment is reverse. The rule we follow here is that\n> attributes that are part of RI must be there in a specified column\n> list. This is used at two places in the patch.\n\nYeah, you're right. Will fix.\n\n> 3. get_rel_sync_entry()\n> {\n> /* XXX is there a danger of memory leak here? beware */\n> + oldctx = MemoryContextSwitchTo(CacheMemoryContext);\n> + for (int i = 0; i < nelems; i++)\n> ...\n> }\n> \n> Similar to the row filter, I think we need to use\n> entry->cache_expr_cxt to allocate this. There are other usages of\n> CacheMemoryContext in this part of the code but I think those need to\n> be also changed and we can do that as a separate patch. If we do the\n> suggested change then we don't need to separately free columns.\n\nI agree a shorter-lived context would be better than CacheMemoryContext,\nbut \"expr\" seems to indicate it's for the expression only, so maybe we\nshould rename that. But do we really want a memory context for every\nsingle entry?\n\n> 4. I think we don't need the DDL changes in AtExecDropColumn. Instead,\n> we can change the dependency of columns to NORMAL during publication\n> commands.\n\nI'll think about that.\n\n> 5. There is a reference to check_publication_columns but that function\n> is removed from the patch.\n\nRight, will fix.\n\n> 6.\n> /*\n> * If we know everything is replicated and the row filter is invalid\n> * for update and delete, there is no point to check for other\n> * publications.\n> */\n> if (pubdesc->pubactions.pubinsert && pubdesc->pubactions.pubupdate &&\n> pubdesc->pubactions.pubdelete && pubdesc->pubactions.pubtruncate &&\n> !pubdesc->rf_valid_for_update && !pubdesc->rf_valid_for_delete)\n> break;\n> \n> /*\n> * If we know everything is replicated and the column filter is invalid\n> * for update and delete, there is no point to check for other\n> * publications.\n> */\n> if (pubdesc->pubactions.pubinsert && pubdesc->pubactions.pubupdate &&\n> pubdesc->pubactions.pubdelete && pubdesc->pubactions.pubtruncate &&\n> !pubdesc->cf_valid_for_update && !pubdesc->cf_valid_for_delete)\n> break;\n> \n> Can we combine these two checks?\n> \n\nI was worried it'd get too complex / hard to understand, but I'll think\nabout maybe simplifying the conditions a bit.\n\n> I feel this patch needs a more thorough review.\n> \n\nI won't object to more review, of course.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Thu, 10 Mar 2022 20:14:37 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/10/22 20:10, Tomas Vondra wrote:\n> \n> \n> On 3/10/22 19:17, Tomas Vondra wrote:\n>> On 3/9/22 11:12, houzj.fnst@fujitsu.com wrote:\n>>> Hi,\n>>>\n>>> Here are some tests and results about the table sync query of\n>>> column filter patch and row filter.\n>>>\n>>> 1) multiple publications which publish schema of parent table and partition.\n>>> ----pub\n>>> create schema s1;\n>>> create table s1.t (a int, b int, c int) partition by range (a);\n>>> create table t_1 partition of s1.t for values from (1) to (10);\n>>> create publication pub1 for all tables in schema s1;\n>>> create publication pub2 for table t_1(b);\n>>>\n>>> ----sub\n>>> - prepare tables\n>>> CREATE SUBSCRIPTION sub CONNECTION 'port=10000 dbname=postgres' PUBLICATION pub1, pub2;\n>>>\n>>> When doing table sync for 't_1', the column list will be (b). I think it should\n>>> be no filter because table t_1 is also published via ALL TABLES IN SCHMEA\n>>> publication.\n>>>\n>>> For Row Filter, it will use no filter for this case.\n>>>\n>>>\n>>> 2) one publication publishes both parent and child\n>>> ----pub\n>>> create table t (a int, b int, c int) partition by range (a);\n>>> create table t_1 partition of t for values from (1) to (10)\n>>> partition by range (a);\n>>> create table t_2 partition of t_1 for values from (1) to (10);\n>>>\n>>> create publication pub2 for table t_1(a), t_2\n>>> with (PUBLISH_VIA_PARTITION_ROOT);\n>>>\n>>> ----sub\n>>> - prepare tables\n>>> CREATE SUBSCRIPTION sub CONNECTION 'port=10000 dbname=postgres' PUBLICATION pub2;\n>>>\n>>> When doing table sync for table 't_1', it has no column list. I think the\n>>> expected column list is (a).\n>>>\n>>> For Row Filter, it will use the row filter of the top most parent table(t_1) in\n>>> this case.\n>>>\n>>>\n>>> 3) one publication publishes both parent and child\n>>> ----pub\n>>> create table t (a int, b int, c int) partition by range (a);\n>>> create table t_1 partition of t for values from (1) to (10)\n>>> partition by range (a);\n>>> create table t_2 partition of t_1 for values from (1) to (10);\n>>>\n>>> create publication pub2 for table t_1(a), t_2(b)\n>>> with (PUBLISH_VIA_PARTITION_ROOT);\n>>>\n>>> ----sub\n>>> - prepare tables\n>>> CREATE SUBSCRIPTION sub CONNECTION 'port=10000 dbname=postgres' PUBLICATION pub2;\n>>>\n>>> When doing table sync for table 't_1', the column list would be (a, b). I think\n>>> the expected column list is (a).\n>>>\n>>> For Row Filter, it will use the row filter of the top most parent table(t_1) in\n>>> this case.\n>>>\n>>\n>> Attached is an updated patch version, addressing all of those issues.\n>>\n>> 0001 is a bugfix, reworking how we calculate publish_as_relid. The old\n>> approach was unstable with multiple publications, giving different\n>> results depending on order of the publications. This should be\n>> backpatched into PG13 where publish_via_partition_root was introduced, I\n>> think.\n>>\n>> 0002 is the main patch, merging the changes proposed by Peter and fixing\n>> the issues reported here. In most cases this means adopting the code\n>> used for row filters, and perhaps simplifying it a bit.\n>>\n>>\n>> But I also tried to implement a row-filter test for 0001, and I'm not\n>> sure I understand the behavior I observe. Consider this:\n>>\n>> -- a chain of 3 partitions (on both publisher and subscriber)\n>> CREATE TABLE test_part_rf (a int primary key, b int, c int)\n>> PARTITION BY LIST (a);\n>>\n>> CREATE TABLE test_part_rf_1\n>> PARTITION OF test_part_rf FOR VALUES IN (1,2,3,4,5)\n>> PARTITION BY LIST (a);\n>>\n>> CREATE TABLE test_part_rf_2\n>> PARTITION OF test_part_rf_1 FOR VALUES IN (1,2,3,4,5);\n>>\n>> -- initial data\n>> INSERT INTO test_part_rf VALUES (1, 5, 100);\n>> INSERT INTO test_part_rf VALUES (2, 15, 200);\n>>\n>> -- two publications, each adding a different partition\n>> CREATE PUBLICATION test_pub_part_1 FOR TABLE test_part_rf_1\n>> WHERE (b < 10) WITH (publish_via_partition_root);\n>>\n>> CREATE PUBLICATION test_pub_part_2 FOR TABLE test_part_rf_2\n>> WHERE (b > 10) WITH (publish_via_partition_root);\n>>\n>> -- now create the subscription (also try opposite ordering)\n>> CREATE SUBSCRIPTION test_part_sub CONNECTION '...'\n>> PUBLICATION test_pub_part_1, test_pub_part_2;\n>>\n>> -- wait for sync\n>>\n>> -- inert some more data\n>> INSERT INTO test_part_rf VALUES (3, 6, 300);\n>> INSERT INTO test_part_rf VALUES (4, 16, 400);\n>>\n>> -- wait for catchup\n>>\n>> Now, based on the discussion here, my expectation is that we'll use the\n>> row filter from the top-most ancestor in any publication, which in this\n>> case is test_part_rf_1. Hence the filter should be (b < 10).\n>>\n>> So I'd expect these rows to be replicated:\n>>\n>> 1,5,100\n>> 3,6,300\n>>\n>> But that's not what I get, unfortunately. I get different results,\n>> depending on the order of publications:\n>>\n>> 1) test_pub_part_1, test_pub_part_2\n>>\n>> 1|5|100\n>> 2|15|200\n>> 3|6|300\n>> 4|16|400\n>>\n>> 2) test_pub_part_2, test_pub_part_1\n>>\n>> 3|6|300\n>> 4|16|400\n>>\n>> That seems pretty bizarre, because it either means we're not enforcing\n>> any filter or some strange combination of filters (notice that for (2)\n>> we skip/replicate rows matching either filter).\n>>\n>> I have to be missing something important, but this seems confusing.\n>> There's a patch adding a simple test case to 028_row_filter.sql (named\n>> .txt, so as not to confuse cfbot).\n>>\n> \n> FWIW I think the reason is pretty simple - pgoutput_row_filter_init is\n> broken. It assumes you can just do this\n> \n> rftuple = SearchSysCache2(PUBLICATIONRELMAP,\n> ObjectIdGetDatum(entry->publish_as_relid),\n> ObjectIdGetDatum(pub->oid));\n> \n> if (HeapTupleIsValid(rftuple))\n> {\n> /* Null indicates no filter. */\n> rfdatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple,\n> Anum_pg_publication_rel_prqual,\n> &pub_no_filter);\n> }\n> else\n> {\n> pub_no_filter = true;\n> }\n> \n> \n> and pub_no_filter=true means there's no filter at all. Which is\n> nonsense, because we're using publish_as_relid here - the publication\n> may not include this particular ancestor, in which case we need to just\n> ignore this publication.\n> \n> So yeah, this needs to be reworked.\n> \n\nI spent a bit of time looking at this, and I think a minor change in\nget_rel_sync_entry() fixes this - it's enough to ensure rel_publications\nonly includes publications that actually include publish_as_relid.\n\nBut this does not address tablesync.c :-( That still copies everything,\nbecause it decides to sync both rels (test_pub_part_1, test_pub_part_2),\nwith it's row filter. On older releases this would fail, because we'd\nstart two workers:\n\n1) COPY public.test_part_rf_2 TO STDOUT\n\n2) COPY (SELECT a, b, c FROM public.test_part_rf_1) TO STDOUT\n\nAnd that ends up inserting date from test_part_rf_2 twice. But now we\nend up doing this instead:\n\n1) COPY (SELECT a, b, c FROM public.test_part_rf_1 WHERE (b < 10)) TO STDOUT\n\n2) COPY (SELECT a, b, c FROM ONLY public.test_part_rf_2 WHERE (b > 10))\nTO STDOUT\n\nWhich no longer conflicts, because those subsets are mutually exclusive\n(due to how the filter is defined), so the sync succeeds.\n\nBut I find this really weird - I think it's reasonable to expect the\nsync to produce the same result as if the data was inserted and\nreplicated, and this just violates that.\n\nShouldn't tablesync calculate a list of relations in a way that prevents\nsuch duplicate / overlapping syncs? In any case, this sync issue looks\nentirely unrelated to the column filtering patch.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Fri, 11 Mar 2022 02:56:41 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Mar 11, 2022 at 12:44 AM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/10/22 04:09, Amit Kapila wrote:\n> > On Wed, Mar 9, 2022 at 3:33 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >>\n> >> On Mon, Mar 7, 2022 at 8:48 PM Tomas Vondra\n> >> <tomas.vondra@enterprisedb.com> wrote:\n> >>\n> >>> OK, I reworked this to do the same thing as the row filtering patch.\n> >>>\n> >>\n> >> Thanks, I'll check this.\n> >>\n> >\n> > Some assorted comments:\n> > =====================\n> > 1. We don't need to send a column list for the old tuple in case of an\n> > update (similar to delete). It is not required to apply a column\n> > filter for those cases because we ensure that RI must be part of the\n> > column list for updates and deletes.\n>\n> I'm not sure which part of the code does this refer to?\n>\n\nThe below part:\n@@ -464,11 +473,11 @@ logicalrep_write_update(StringInfo out,\nTransactionId xid, Relation rel,\n pq_sendbyte(out, 'O'); /* old tuple follows */\n else\n pq_sendbyte(out, 'K'); /* old key follows */\n- logicalrep_write_tuple(out, rel, oldslot, binary);\n+ logicalrep_write_tuple(out, rel, oldslot, binary, columns);\n }\n\nI think here instead of columns, the patch needs to send NULL as it is\nalready doing in logicalrep_write_delete.\n\n> > 2.\n> > + /*\n> > + * Check if all columns referenced in the column filter are part of\n> > + * the REPLICA IDENTITY index or not.\n> >\n> > I think this comment is reverse. The rule we follow here is that\n> > attributes that are part of RI must be there in a specified column\n> > list. This is used at two places in the patch.\n>\n> Yeah, you're right. Will fix.\n>\n> > 3. get_rel_sync_entry()\n> > {\n> > /* XXX is there a danger of memory leak here? beware */\n> > + oldctx = MemoryContextSwitchTo(CacheMemoryContext);\n> > + for (int i = 0; i < nelems; i++)\n> > ...\n> > }\n> >\n> > Similar to the row filter, I think we need to use\n> > entry->cache_expr_cxt to allocate this. There are other usages of\n> > CacheMemoryContext in this part of the code but I think those need to\n> > be also changed and we can do that as a separate patch. If we do the\n> > suggested change then we don't need to separately free columns.\n>\n> I agree a shorter-lived context would be better than CacheMemoryContext,\n> but \"expr\" seems to indicate it's for the expression only, so maybe we\n> should rename that.\n>\n\nYeah, we can do that. How about rel_entry_cxt or something like that?\nThe idea is that eventually, we should move a few other things of\nRelSyncEntry like attrmap where we are using CacheMemoryContext under\nthis context.\n\n> But do we really want a memory context for every\n> single entry?\n>\n\nAny other better idea?\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 11 Mar 2022 08:16:57 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Mar 11, 2022 at 7:26 AM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> But this does not address tablesync.c :-( That still copies everything,\n> because it decides to sync both rels (test_pub_part_1, test_pub_part_2),\n> with it's row filter. On older releases this would fail, because we'd\n> start two workers:\n>\n\nYeah, this is because of the existing problem where we sync both rels\ninstead of one. We have fixed some similar existing problems earlier.\nHou-San has reported a similar case in another email [1].\n\n>\n> But I find this really weird - I think it's reasonable to expect the\n> sync to produce the same result as if the data was inserted and\n> replicated, and this just violates that.\n>\n> Shouldn't tablesync calculate a list of relations in a way that prevents\n> such duplicate / overlapping syncs?\n>\n\nYes, I think it is better to fix it separately than to fix it along\nwith row filter or column filter work.\n\n>\nIn any case, this sync issue looks\n> entirely unrelated to the column filtering patch.\n>\n\nRight.\n\n[1] - https://www.postgresql.org/message-id/OS0PR01MB5716DC2982CC735FDE388804940B9%40OS0PR01MB5716.jpnprd01.prod.outlook.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 11 Mar 2022 09:00:31 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/11/22 03:46, Amit Kapila wrote:\n> On Fri, Mar 11, 2022 at 12:44 AM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> On 3/10/22 04:09, Amit Kapila wrote:\n>>> On Wed, Mar 9, 2022 at 3:33 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>>>>\n>>>> On Mon, Mar 7, 2022 at 8:48 PM Tomas Vondra\n>>>> <tomas.vondra@enterprisedb.com> wrote:\n>>>>\n>>>>> OK, I reworked this to do the same thing as the row filtering patch.\n>>>>>\n>>>>\n>>>> Thanks, I'll check this.\n>>>>\n>>>\n>>> Some assorted comments:\n>>> =====================\n>>> 1. We don't need to send a column list for the old tuple in case of an\n>>> update (similar to delete). It is not required to apply a column\n>>> filter for those cases because we ensure that RI must be part of the\n>>> column list for updates and deletes.\n>>\n>> I'm not sure which part of the code does this refer to?\n>>\n> \n> The below part:\n> @@ -464,11 +473,11 @@ logicalrep_write_update(StringInfo out,\n> TransactionId xid, Relation rel,\n> pq_sendbyte(out, 'O'); /* old tuple follows */\n> else\n> pq_sendbyte(out, 'K'); /* old key follows */\n> - logicalrep_write_tuple(out, rel, oldslot, binary);\n> + logicalrep_write_tuple(out, rel, oldslot, binary, columns);\n> }\n> \n> I think here instead of columns, the patch needs to send NULL as it is\n> already doing in logicalrep_write_delete.\n> \n\nHmmm, yeah. In practice it doesn't really matter, because NULL means\n\"send all columns\" so it actually relaxes the check. But we only send\nthe RI keys, which is a subset of the column filter. But will fix.\n\n>>> 2.\n>>> + /*\n>>> + * Check if all columns referenced in the column filter are part of\n>>> + * the REPLICA IDENTITY index or not.\n>>>\n>>> I think this comment is reverse. The rule we follow here is that\n>>> attributes that are part of RI must be there in a specified column\n>>> list. This is used at two places in the patch.\n>>\n>> Yeah, you're right. Will fix.\n>>\n>>> 3. get_rel_sync_entry()\n>>> {\n>>> /* XXX is there a danger of memory leak here? beware */\n>>> + oldctx = MemoryContextSwitchTo(CacheMemoryContext);\n>>> + for (int i = 0; i < nelems; i++)\n>>> ...\n>>> }\n>>>\n>>> Similar to the row filter, I think we need to use\n>>> entry->cache_expr_cxt to allocate this. There are other usages of\n>>> CacheMemoryContext in this part of the code but I think those need to\n>>> be also changed and we can do that as a separate patch. If we do the\n>>> suggested change then we don't need to separately free columns.\n>>\n>> I agree a shorter-lived context would be better than CacheMemoryContext,\n>> but \"expr\" seems to indicate it's for the expression only, so maybe we\n>> should rename that.\n>>\n> \n> Yeah, we can do that. How about rel_entry_cxt or something like that?\n> The idea is that eventually, we should move a few other things of\n> RelSyncEntry like attrmap where we are using CacheMemoryContext under\n> this context.\n> \n\nYeah, rel_entry_cxt sounds fine I guess ...\n\n>> But do we really want a memory context for every\n>> single entry?\n>>\n> \n> Any other better idea?\n> \n\nNo, I think you're right - it'd be hard/impossible to keep track of all\nthe memory allocated for expression/estate. It'd be fine for the\ncolumns, because that's just a bitmap, but not for the expressions.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Fri, 11 Mar 2022 05:22:14 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Mar 11, 2022 at 9:57 AM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\r\n>\r\nHi Tomas,\r\nThanks for your patches.\r\n\r\nOn Mon, Mar 9, 2022 at 9:53 PM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\r\n>On Wed, Mar 9, 2022 at 6:04 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\r\n>>On Mon, Mar 7, 2022 at 11:18 PM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\r\n>>>On Fri, Mar 4, 2022 at 6:43 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\r\n> >>> Fetching column filter info in tablesync.c is quite expensive. It\r\n> >>> seems to be using four round-trips to get the complete info whereas\r\n> >>> for row-filter we use just one round trip. I think we should try to\r\n> >>> get both row filter and column filter info in just one round trip.\r\n> >>>\r\n> >>\r\n> >> Maybe, but I really don't think this is an issue.\r\n> >>\r\n> >\r\n> > I am not sure but it might matter for small tables. Leaving aside the\r\n> > performance issue, I think the current way will get the wrong column\r\n> > list in many cases: (a) The ALL TABLES IN SCHEMA case handling won't\r\n> > work for partitioned tables when the partitioned table is part of one\r\n> > schema and partition table is part of another schema. (b) The handling\r\n> > of partition tables in other cases will fetch incorrect lists as it\r\n> > tries to fetch the column list of all the partitions in the hierarchy.\r\n> >\r\n> > One of my colleagues has even tested these cases both for column\r\n> > filters and row filters and we find the behavior of row filter is okay\r\n> > whereas for column filter it uses the wrong column list. We will share\r\n> > the tests and results with you in a later email. We are trying to\r\n> > unify the column filter queries with row filter to make their behavior\r\n> > the same and will share the findings once it is done. I hope if we are\r\n> > able to achieve this that we will reduce the chances of bugs in this\r\n> > area.\r\n> >\r\n> \r\n> OK, I'll take a look at that email.\r\nI tried to get both the column filters and the row filters with one SQL, but\r\nit failed because I think the result is not easy to parse.\r\n\r\nI noted that we use two SQLs to get column filters in the latest\r\npatches(20220311). I think maybe we could use one SQL to get column filters to\r\nreduce network cost. Like the SQL in the attachment.\r\n\r\nRegards,\r\nWang wei", "msg_date": "Fri, 11 Mar 2022 07:05:59 +0000", "msg_from": "\"wangw.fnst@fujitsu.com\" <wangw.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Mar 11, 2022 at 7:26 AM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/10/22 20:10, Tomas Vondra wrote:\n> >\n> >\n> > FWIW I think the reason is pretty simple - pgoutput_row_filter_init is\n> > broken. It assumes you can just do this\n> >\n> > rftuple = SearchSysCache2(PUBLICATIONRELMAP,\n> > ObjectIdGetDatum(entry->publish_as_relid),\n> > ObjectIdGetDatum(pub->oid));\n> >\n> > if (HeapTupleIsValid(rftuple))\n> > {\n> > /* Null indicates no filter. */\n> > rfdatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple,\n> > Anum_pg_publication_rel_prqual,\n> > &pub_no_filter);\n> > }\n> > else\n> > {\n> > pub_no_filter = true;\n> > }\n> >\n> >\n> > and pub_no_filter=true means there's no filter at all. Which is\n> > nonsense, because we're using publish_as_relid here - the publication\n> > may not include this particular ancestor, in which case we need to just\n> > ignore this publication.\n> >\n> > So yeah, this needs to be reworked.\n> >\n>\n> I spent a bit of time looking at this, and I think a minor change in\n> get_rel_sync_entry() fixes this - it's enough to ensure rel_publications\n> only includes publications that actually include publish_as_relid.\n>\n\nThanks for looking into this. I think in the first patch before\ncalling get_partition_ancestors() we need to ensure it is a partition\n(the call expects that) and pubviaroot is true. I think it would be\ngood if we can avoid an additional call to get_partition_ancestors()\nas it could be costly. I wonder why it is not sufficient to ensure\nthat publish_as_relid exists after ancestor in ancestors list before\nassigning the ancestor to publish_as_relid? This only needs to be done\nin case of (if (!publish)). I have not tried this so I could be wrong.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 11 Mar 2022 15:22:19 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/11/22 08:05, wangw.fnst@fujitsu.com wrote:\n> On Fri, Mar 11, 2022 at 9:57 AM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\n>>\n> Hi Tomas,\n> Thanks for your patches.\n> \n> On Mon, Mar 9, 2022 at 9:53 PM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\n>> On Wed, Mar 9, 2022 at 6:04 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>>> On Mon, Mar 7, 2022 at 11:18 PM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\n>>>> On Fri, Mar 4, 2022 at 6:43 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>>>>> Fetching column filter info in tablesync.c is quite expensive. It\n>>>>> seems to be using four round-trips to get the complete info whereas\n>>>>> for row-filter we use just one round trip. I think we should try to\n>>>>> get both row filter and column filter info in just one round trip.\n>>>>>\n>>>>\n>>>> Maybe, but I really don't think this is an issue.\n>>>>\n>>>\n>>> I am not sure but it might matter for small tables. Leaving aside the\n>>> performance issue, I think the current way will get the wrong column\n>>> list in many cases: (a) The ALL TABLES IN SCHEMA case handling won't\n>>> work for partitioned tables when the partitioned table is part of one\n>>> schema and partition table is part of another schema. (b) The handling\n>>> of partition tables in other cases will fetch incorrect lists as it\n>>> tries to fetch the column list of all the partitions in the hierarchy.\n>>>\n>>> One of my colleagues has even tested these cases both for column\n>>> filters and row filters and we find the behavior of row filter is okay\n>>> whereas for column filter it uses the wrong column list. We will share\n>>> the tests and results with you in a later email. We are trying to\n>>> unify the column filter queries with row filter to make their behavior\n>>> the same and will share the findings once it is done. I hope if we are\n>>> able to achieve this that we will reduce the chances of bugs in this\n>>> area.\n>>>\n>>\n>> OK, I'll take a look at that email.\n> I tried to get both the column filters and the row filters with one SQL, but\n> it failed because I think the result is not easy to parse.\n> \n> I noted that we use two SQLs to get column filters in the latest\n> patches(20220311). I think maybe we could use one SQL to get column filters to\n> reduce network cost. Like the SQL in the attachment.\n> \n\nI'll take a look. But as I said before - I very much prefer SQL that is\neasy to understand, and I don't think the one extra round trip is an\nissue during tablesync (which is a very rare action).\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Fri, 11 Mar 2022 13:36:11 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/11/22 10:52, Amit Kapila wrote:\n> On Fri, Mar 11, 2022 at 7:26 AM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> On 3/10/22 20:10, Tomas Vondra wrote:\n>>>\n>>>\n>>> FWIW I think the reason is pretty simple - pgoutput_row_filter_init is\n>>> broken. It assumes you can just do this\n>>>\n>>> rftuple = SearchSysCache2(PUBLICATIONRELMAP,\n>>> ObjectIdGetDatum(entry->publish_as_relid),\n>>> ObjectIdGetDatum(pub->oid));\n>>>\n>>> if (HeapTupleIsValid(rftuple))\n>>> {\n>>> /* Null indicates no filter. */\n>>> rfdatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple,\n>>> Anum_pg_publication_rel_prqual,\n>>> &pub_no_filter);\n>>> }\n>>> else\n>>> {\n>>> pub_no_filter = true;\n>>> }\n>>>\n>>>\n>>> and pub_no_filter=true means there's no filter at all. Which is\n>>> nonsense, because we're using publish_as_relid here - the publication\n>>> may not include this particular ancestor, in which case we need to just\n>>> ignore this publication.\n>>>\n>>> So yeah, this needs to be reworked.\n>>>\n>>\n>> I spent a bit of time looking at this, and I think a minor change in\n>> get_rel_sync_entry() fixes this - it's enough to ensure rel_publications\n>> only includes publications that actually include publish_as_relid.\n>>\n> \n> Thanks for looking into this. I think in the first patch before\n> calling get_partition_ancestors() we need to ensure it is a partition\n> (the call expects that) and pubviaroot is true.\n\nDoes the call really require that? Also, I'm not sure why we'd need to\nlook at pubviaroot - that's already considered earlier when calculating\npublish_as_relid, here we just need to know the relationship of the two\nOIDs (if one is ancestor/child of the other).\n\n> I think it would be\n> good if we can avoid an additional call to get_partition_ancestors()\n> as it could be costly.\n\nMaybe. OTOH we only should do this only very rarely anyway.\n\n> I wonder why it is not sufficient to ensure\n> that publish_as_relid exists after ancestor in ancestors list before\n> assigning the ancestor to publish_as_relid? This only needs to be done\n> in case of (if (!publish)). I have not tried this so I could be wrong.\n> \n\nI'm not sure what exactly are you proposing. Maybe try coding it? That's\nprobably faster than trying to describe what the code might do ...\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Fri, 11 Mar 2022 13:50:29 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Mar 11, 2022 at 6:20 PM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/11/22 10:52, Amit Kapila wrote:\n> > On Fri, Mar 11, 2022 at 7:26 AM Tomas Vondra\n> > <tomas.vondra@enterprisedb.com> wrote:\n> >>\n> >> On 3/10/22 20:10, Tomas Vondra wrote:\n> >>>\n> >>>\n> >>> FWIW I think the reason is pretty simple - pgoutput_row_filter_init is\n> >>> broken. It assumes you can just do this\n> >>>\n> >>> rftuple = SearchSysCache2(PUBLICATIONRELMAP,\n> >>> ObjectIdGetDatum(entry->publish_as_relid),\n> >>> ObjectIdGetDatum(pub->oid));\n> >>>\n> >>> if (HeapTupleIsValid(rftuple))\n> >>> {\n> >>> /* Null indicates no filter. */\n> >>> rfdatum = SysCacheGetAttr(PUBLICATIONRELMAP, rftuple,\n> >>> Anum_pg_publication_rel_prqual,\n> >>> &pub_no_filter);\n> >>> }\n> >>> else\n> >>> {\n> >>> pub_no_filter = true;\n> >>> }\n> >>>\n> >>>\n> >>> and pub_no_filter=true means there's no filter at all. Which is\n> >>> nonsense, because we're using publish_as_relid here - the publication\n> >>> may not include this particular ancestor, in which case we need to just\n> >>> ignore this publication.\n> >>>\n> >>> So yeah, this needs to be reworked.\n> >>>\n> >>\n> >> I spent a bit of time looking at this, and I think a minor change in\n> >> get_rel_sync_entry() fixes this - it's enough to ensure rel_publications\n> >> only includes publications that actually include publish_as_relid.\n> >>\n> >\n> > Thanks for looking into this. I think in the first patch before\n> > calling get_partition_ancestors() we need to ensure it is a partition\n> > (the call expects that) and pubviaroot is true.\n>\n> Does the call really require that?\n>\n\nThere may not be any harm but I have mentioned it because (a) the\ncomments atop get_partition_ancestors(...it should only be called when\nit is known that the relation is a partition.) indicates the same; (b)\nall existing callers seems to use it only for partitions.\n\n> Also, I'm not sure why we'd need to\n> look at pubviaroot - that's already considered earlier when calculating\n> publish_as_relid, here we just need to know the relationship of the two\n> OIDs (if one is ancestor/child of the other).\n>\n\nI thought of avoiding calling get_partition_ancestors when pubviaroot\nis not set. It will unnecessary check the whole hierarchy for\npartitions even when it is not required. I agree that this is not a\ncommon code path but still felt why do it needlessly?\n\n> > I think it would be\n> > good if we can avoid an additional call to get_partition_ancestors()\n> > as it could be costly.\n>\n> Maybe. OTOH we only should do this only very rarely anyway.\n>\n> > I wonder why it is not sufficient to ensure\n> > that publish_as_relid exists after ancestor in ancestors list before\n> > assigning the ancestor to publish_as_relid? This only needs to be done\n> > in case of (if (!publish)). I have not tried this so I could be wrong.\n> >\n>\n> I'm not sure what exactly are you proposing. Maybe try coding it? That's\n> probably faster than trying to describe what the code might do ...\n>\n\nOkay, please find attached. I have done basic testing of this, if we\nagree with this approach then this will require some more testing.\n\n-- \nWith Regards,\nAmit Kapila.", "msg_date": "Sat, 12 Mar 2022 10:00:14 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/12/22 05:30, Amit Kapila wrote:\n>> ...\n> \n> Okay, please find attached. I have done basic testing of this, if we\n> agree with this approach then this will require some more testing.\n> \n\nThanks, the proposed changes seem like a clear improvement, so I've\nadded them, with some minor tweaks (mostly to comments).\n\nI've also included the memory context rename (entry_changes to the\nchange proposed by Wang Wei, using a single SQL command in tablesync.\n\nAnd I've renamed the per-entry memory context to entry_cxt, and used it\nfor the column list.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Sun, 13 Mar 2022 22:07:49 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Mar 14, 2022 at 2:37 AM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/12/22 05:30, Amit Kapila wrote:\n> >> ...\n> >\n> > Okay, please find attached. I have done basic testing of this, if we\n> > agree with this approach then this will require some more testing.\n> >\n>\n> Thanks, the proposed changes seem like a clear improvement, so I've\n> added them, with some minor tweaks (mostly to comments).\n>\n\nOne minor point: Did you intentionally remove\nlist_free(rel_publications) before resetting the list from the second\npatch? The memory for rel_publications is allocated in\nTopTransactionContext, so a large transaction touching many relations\nwill only free this at end of the transaction which may not be a big\ndeal as we don't do this every time. We free this list a few lines\ndown in successful case so this appears slightly odd to me but I am\nfine if you think it doesn't matter.\n\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 14 Mar 2022 15:23:44 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Monday, March 14, 2022 5:08 AM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\r\n> \r\n> On 3/12/22 05:30, Amit Kapila wrote:\r\n> >> ...\r\n> >\r\n> > Okay, please find attached. I have done basic testing of this, if we\r\n> > agree with this approach then this will require some more testing.\r\n> >\r\n> \r\n> Thanks, the proposed changes seem like a clear improvement, so I've\r\n> added them, with some minor tweaks (mostly to comments).\r\n\r\nHi,\r\n\r\nThanks for updating the patches !\r\nAnd sorry for the row filter bug caused by my mistake.\r\n\r\nI looked at the two fixup patches. I am thinking would it be better if we\r\nadd one testcase for these two bugs? Maybe like the attachment.\r\n\r\n(Attach the fixup patch to make the cfbot happy)\r\n\r\nBest regards,\r\nHou zj", "msg_date": "Mon, 14 Mar 2022 11:12:25 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/14/22 10:53, Amit Kapila wrote:\n> On Mon, Mar 14, 2022 at 2:37 AM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> On 3/12/22 05:30, Amit Kapila wrote:\n>>>> ...\n>>>\n>>> Okay, please find attached. I have done basic testing of this, if we\n>>> agree with this approach then this will require some more testing.\n>>>\n>>\n>> Thanks, the proposed changes seem like a clear improvement, so I've\n>> added them, with some minor tweaks (mostly to comments).\n>>\n> \n> One minor point: Did you intentionally remove\n> list_free(rel_publications) before resetting the list from the second\n> patch? The memory for rel_publications is allocated in\n> TopTransactionContext, so a large transaction touching many relations\n> will only free this at end of the transaction which may not be a big\n> deal as we don't do this every time. We free this list a few lines\n> down in successful case so this appears slightly odd to me but I am\n> fine if you think it doesn't matter.\n\nThe removal was not intentional, but I don't think it's an issue exactly\nbecause it's a tiny mount of memory and we'll release it at the end of\nthe transaction. Which should not take long.\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 14 Mar 2022 13:05:24 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/14/22 12:12, houzj.fnst@fujitsu.com wrote:\n> On Monday, March 14, 2022 5:08 AM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> On 3/12/22 05:30, Amit Kapila wrote:\n>>>> ...\n>>>\n>>> Okay, please find attached. I have done basic testing of this, if we\n>>> agree with this approach then this will require some more testing.\n>>>\n>>\n>> Thanks, the proposed changes seem like a clear improvement, so I've\n>> added them, with some minor tweaks (mostly to comments).\n> \n> Hi,\n> \n> Thanks for updating the patches !\n> And sorry for the row filter bug caused by my mistake.\n> \n> I looked at the two fixup patches. I am thinking would it be better if we\n> add one testcase for these two bugs? Maybe like the attachment.\n> \n\nYeah, a test would be nice - I'll take a look later.\n\nAnyway, the fix does not address tablesync, as explained in [1]. I'm not\nsure what to do about it - in principle, we could calculate which\nrelations to sync, and then eliminate \"duplicates\" (i.e. relations where\nwe are going to sync an ancestor).\n\n\nregards\n\n[1]\nhttps://www.postgresql.org/message-id/822a8e40-287c-59ff-0ea9-35eb759f4fe6%40enterprisedb.com\n\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 14 Mar 2022 13:11:58 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Mar 14, 2022 at 5:42 PM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/14/22 12:12, houzj.fnst@fujitsu.com wrote:\n> > On Monday, March 14, 2022 5:08 AM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\n>\n> Anyway, the fix does not address tablesync, as explained in [1]. I'm not\n> sure what to do about it - in principle, we could calculate which\n> relations to sync, and then eliminate \"duplicates\" (i.e. relations where\n> we are going to sync an ancestor).\n>\n\nAs mentioned in my previous email [1], this appears to be a base code\nissue (even without row filter or column filter work), so it seems\nbetter to deal with it separately. It has been reported separately as\nwell [2] where we found some similar issues.\n\n\n[1] - https://www.postgresql.org/message-id/CAA4eK1LSb-xrvGEm3ShaRA%3DMkdii2d%2B4vqh9DGPvVDA%2BD9ibYw%40mail.gmail.com\n[2] - https://www.postgresql.org/message-id/OS0PR01MB5716DC2982CC735FDE388804940B9@OS0PR01MB5716.jpnprd01.prod.outlook.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 14 Mar 2022 18:17:10 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/14/22 13:47, Amit Kapila wrote:\n> On Mon, Mar 14, 2022 at 5:42 PM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> On 3/14/22 12:12, houzj.fnst@fujitsu.com wrote:\n>>> On Monday, March 14, 2022 5:08 AM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> Anyway, the fix does not address tablesync, as explained in [1]. I'm not\n>> sure what to do about it - in principle, we could calculate which\n>> relations to sync, and then eliminate \"duplicates\" (i.e. relations where\n>> we are going to sync an ancestor).\n>>\n> \n> As mentioned in my previous email [1], this appears to be a base code\n> issue (even without row filter or column filter work), so it seems\n> better to deal with it separately. It has been reported separately as\n> well [2] where we found some similar issues.\n> \n\nRight. I don't want to be waiting for that fix either, that'd block this\npatch unnecessarily. If there are no other comments, I'll go ahead,\npolish the existing patches a bit more and get them committed. We can\nworry about this pre-existing issue later.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 14 Mar 2022 14:32:53 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Mar 14, 2022 5:08 AM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\r\n> \r\n> On 3/12/22 05:30, Amit Kapila wrote:\r\n> >> ...\r\n> >\r\n> > Okay, please find attached. I have done basic testing of this, if we\r\n> > agree with this approach then this will require some more testing.\r\n> >\r\n> \r\n> Thanks, the proposed changes seem like a clear improvement, so I've\r\n> added them, with some minor tweaks (mostly to comments).\r\n> \r\n> I've also included the memory context rename (entry_changes to the\r\n> change proposed by Wang Wei, using a single SQL command in tablesync.\r\n> \r\n> And I've renamed the per-entry memory context to entry_cxt, and used it\r\n> for the column list.\r\n> \r\n\r\nThanks for your patch.\r\nHere are some comments for column filter main patch (0003 patch).\r\n\r\n1. doc/src/sgml/catalogs.sgml\r\n@@ -6263,6 +6263,19 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l\r\n Reference to schema\r\n </para></entry>\r\n </row>\r\n+\r\n+ <row>\r\n+ <entry role=\"catalog_table_entry\"><para role=\"column_definition\">\r\n+ <structfield>prattrs</structfield> <type>int2vector</type>\r\n+ (references <link linkend=\"catalog-pg-attribute\"><structname>pg_attribute</structname></link>.<structfield>attnum</structfield>)\r\n+ </para>\r\n+ <para>\r\n+ This is an array of values that indicates which table columns are\r\n+ part of the publication. For example, a value of <literal>1 3</literal>\r\n+ would mean that the first and the third table columns are published.\r\n+ A null value indicates that all columns are published.\r\n+ </para></entry>\r\n+ </row>\r\n </tbody>\r\n </tgroup>\r\n </table>\r\n\r\nThis change was added to pg_publication_namespace view. I think it should be\r\nadded to pg_publication_rel view, right?\r\n\r\n2. src/backend/replication/pgoutput/pgoutput.c\r\n@@ -188,6 +202,7 @@ static EState *create_estate_for_relation(Relation rel);\r\n static void pgoutput_row_filter_init(PGOutputData *data,\r\n \t\t\t\t\t\t\t\t\t List *publications,\r\n \t\t\t\t\t\t\t\t\t RelationSyncEntry *entry);\r\n+\r\n static bool pgoutput_row_filter_exec_expr(ExprState *state,\r\n \t\t\t\t\t\t\t\t\t\t ExprContext *econtext);\r\n static bool pgoutput_row_filter(Relation relation, TupleTableSlot *old_slot,\r\n\r\nShould we remove this change?\r\n\r\n3. src/backend/commands/publicationcmds.c\r\n+/*\r\n+ * Check if all columns referenced in the column list are part of the\r\n+ * REPLICA IDENTITY index or not.\r\n+ *\r\n+ * Returns true if any invalid column is found.\r\n+ */\r\n\r\nThe comment for pub_collist_contains_invalid_column() seems wrong. Should it be\r\n\"Check if all REPLICA IDENTITY columns are covered by the column list or not\"?\r\n\r\n4.\r\nThe patch doesn't allow delete and update operations if the target table uses\r\nreplica identity full and it is published with column list specified, even if\r\ncolumn list includes all columns in the table.\r\n\r\nFor example:\r\ncreate table tbl (a int, b int, c int);\r\ncreate publication pub for table tbl (a, b, c);\r\nalter table tbl replica identity full;\r\n\r\npostgres=# delete from tbl;\r\nERROR: cannot delete from table \"tbl\"\r\nDETAIL: Column list used by the publication does not cover the replica identity.\r\n\r\nShould we allow this case? I think it doesn't seem to cause harm.\r\n\r\n5. \r\nMaybe we need some changes for tab-complete.c.\r\n\r\nRegards,\r\nShi yu\r\n", "msg_date": "Tue, 15 Mar 2022 02:08:50 +0000", "msg_from": "\"shiy.fnst@fujitsu.com\" <shiy.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Mar 14, 2022 at 4:42 PM houzj.fnst@fujitsu.com\n<houzj.fnst@fujitsu.com> wrote:\n>\n> On Monday, March 14, 2022 5:08 AM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\n> >\n> > On 3/12/22 05:30, Amit Kapila wrote:\n> > >> ...\n> > >\n> > > Okay, please find attached. I have done basic testing of this, if we\n> > > agree with this approach then this will require some more testing.\n> > >\n> >\n> > Thanks, the proposed changes seem like a clear improvement, so I've\n> > added them, with some minor tweaks (mostly to comments).\n>\n> Hi,\n>\n> Thanks for updating the patches !\n> And sorry for the row filter bug caused by my mistake.\n>\n> I looked at the two fixup patches. I am thinking would it be better if we\n> add one testcase for these two bugs? Maybe like the attachment.\n>\n\nYour tests look good to me. We might want to add some comments for\neach test but I guess that can be done before committing. Tomas, it\nseems you are planning to push these bug fixes, do let me know if you\nwant me to take care of these while you focus on the main patch? I\nthink the first patch needs to be backpatched till 13 and the second\none is for just HEAD.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 15 Mar 2022 10:13:53 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Mar 14, 2022 at 7:02 PM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/14/22 13:47, Amit Kapila wrote:\n> > On Mon, Mar 14, 2022 at 5:42 PM Tomas Vondra\n> > <tomas.vondra@enterprisedb.com> wrote:\n> >>\n> >> On 3/14/22 12:12, houzj.fnst@fujitsu.com wrote:\n> >>> On Monday, March 14, 2022 5:08 AM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\n> >>\n> >> Anyway, the fix does not address tablesync, as explained in [1]. I'm not\n> >> sure what to do about it - in principle, we could calculate which\n> >> relations to sync, and then eliminate \"duplicates\" (i.e. relations where\n> >> we are going to sync an ancestor).\n> >>\n> >\n> > As mentioned in my previous email [1], this appears to be a base code\n> > issue (even without row filter or column filter work), so it seems\n> > better to deal with it separately. It has been reported separately as\n> > well [2] where we found some similar issues.\n> >\n>\n> Right. I don't want to be waiting for that fix either, that'd block this\n> patch unnecessarily. If there are no other comments, I'll go ahead,\n> polish the existing patches a bit more and get them committed. We can\n> worry about this pre-existing issue later.\n>\n\nI think the first two patches are ready to go. I haven't read the\nlatest version in detail but I have in mind that we want to get this\nin for PG-15.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 15 Mar 2022 10:16:19 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/15/22 05:43, Amit Kapila wrote:\n> On Mon, Mar 14, 2022 at 4:42 PM houzj.fnst@fujitsu.com\n> <houzj.fnst@fujitsu.com> wrote:\n>>\n>> On Monday, March 14, 2022 5:08 AM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\n>>>\n>>> On 3/12/22 05:30, Amit Kapila wrote:\n>>>>> ...\n>>>>\n>>>> Okay, please find attached. I have done basic testing of this, if we\n>>>> agree with this approach then this will require some more testing.\n>>>>\n>>>\n>>> Thanks, the proposed changes seem like a clear improvement, so I've\n>>> added them, with some minor tweaks (mostly to comments).\n>>\n>> Hi,\n>>\n>> Thanks for updating the patches !\n>> And sorry for the row filter bug caused by my mistake.\n>>\n>> I looked at the two fixup patches. I am thinking would it be better if we\n>> add one testcase for these two bugs? Maybe like the attachment.\n>>\n> \n> Your tests look good to me. We might want to add some comments for\n> each test but I guess that can be done before committing. Tomas, it\n> seems you are planning to push these bug fixes, do let me know if you\n> want me to take care of these while you focus on the main patch? I\n> think the first patch needs to be backpatched till 13 and the second\n> one is for just HEAD.\n> \n\nYeah, I plan to push the fixes later today. I'll polish them a bit\nfirst, and merge the tests (shared by Hou zj) into the patches etc.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Tue, 15 Mar 2022 09:30:44 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, Mar 15, 2022 at 7:38 AM shiy.fnst@fujitsu.com\n<shiy.fnst@fujitsu.com> wrote:\n>\n> On Mon, Mar 14, 2022 5:08 AM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\n>\n> 3. src/backend/commands/publicationcmds.c\n> +/*\n> + * Check if all columns referenced in the column list are part of the\n> + * REPLICA IDENTITY index or not.\n> + *\n> + * Returns true if any invalid column is found.\n> + */\n>\n> The comment for pub_collist_contains_invalid_column() seems wrong. Should it be\n> \"Check if all REPLICA IDENTITY columns are covered by the column list or not\"?\n>\n\nOn similar lines, I think errdetail for below messages need to be changed.\nereport(ERROR,\n+ (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),\n+ errmsg(\"cannot update table \\\"%s\\\"\",\n+ RelationGetRelationName(rel)),\n+ errdetail(\"Column list used by the publication does not cover the\nreplica identity.\")));\n else if (cmd == CMD_DELETE && !pubdesc.rf_valid_for_delete)\n ereport(ERROR,\n (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),\n errmsg(\"cannot delete from table \\\"%s\\\"\",\n RelationGetRelationName(rel)),\n errdetail(\"Column used in the publication WHERE expression is not\npart of the replica identity.\")));\n+ else if (cmd == CMD_DELETE && !pubdesc.cols_valid_for_delete)\n+ ereport(ERROR,\n+ (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),\n+ errmsg(\"cannot delete from table \\\"%s\\\"\",\n+ RelationGetRelationName(rel)),\n+ errdetail(\"Column list used by the publication does not cover the\nreplica identity.\")));\n\nSome assorted comments:\n========================\n1. As mentioned previously as well[1], the change in ATExecDropColumn\nis not required. Similarly, the change you seem to agree upon in\nlogicalrep_write_update[2] doesn't seem to be present.\n\n2. I think the dependency handling in publication_set_table_columns()\nhas problems. While removing existing dependencies, it uses\nPublicationRelationId as classId whereas while adding new dependencies\nit uses PublicationRelRelationId as classId. This will create problems\nwhile removing columns from table. For example,\npostgres=# create table t1(c1 int, c2 int, c3 int);\nCREATE TABLE\npostgres=# create publication pub1 for table t1(c1, c2);\nCREATE PUBLICATION\npostgres=# select * from pg_depend where classid = 6106 or refclassid\n= 6106 or classid = 6104;\n classid | objid | objsubid | refclassid | refobjid | refobjsubid | deptype\n---------+-------+----------+------------+----------+-------------+---------\n 6106 | 16409 | 0 | 1259 | 16405 | 1 | a\n 6106 | 16409 | 0 | 1259 | 16405 | 2 | a\n 6106 | 16409 | 0 | 6104 | 16408 | 0 | a\n 6106 | 16409 | 0 | 1259 | 16405 | 0 | a\n(4 rows)\n\nTill here everything is fine.\n\npostgres=# Alter publication pub1 alter table t1 set columns(c2);\nALTER PUBLICATION\npostgres=# select * from pg_depend where classid = 6106 or refclassid\n= 6106 or classid = 6104;\n classid | objid | objsubid | refclassid | refobjid | refobjsubid | deptype\n---------+-------+----------+------------+----------+-------------+---------\n 6106 | 16409 | 0 | 1259 | 16405 | 1 | a\n 6106 | 16409 | 0 | 1259 | 16405 | 2 | a\n 6106 | 16409 | 0 | 6104 | 16408 | 0 | a\n 6106 | 16409 | 0 | 1259 | 16405 | 0 | a\n 6106 | 16409 | 0 | 1259 | 16405 | 2 | a\n(5 rows)\n\nNow without removing dependencies for columns 1 and 2, it added a new\ndependency for column 2.\n\n3.\n@@ -930,8 +1054,24 @@ copy_table(Relation rel)\n...\n+ for (int i = 0; i < lrel.natts; i++)\n+ {\n+ if (i > 0)\n+ appendStringInfoString(&cmd, \", \");\n+\n+ appendStringInfoString(&cmd, quote_identifier(lrel.attnames[i]));\n+ }\n...\n...\nfor (int i = 0; i < lrel.natts; i++)\n{\nappendStringInfoString(&cmd, quote_identifier(lrel.attnames[i]));\nif (i < lrel.natts - 1)\nappendStringInfoString(&cmd, \", \");\n}\n\nIn the same function, we use two different styles to achieve the same\nthing. I think it is better to use the same style (probably existing)\nat both places for the sake of consistency.\n\n4.\n+ <para>\n+ The <literal>ALTER TABLE ... SET COLUMNS</literal> variant allows changing\n+ the set of columns that are included in the publication. If a column list\n+ is specified, it must include the replica identity columns.\n+ </para>\n\nI think the second part holds true only for update/delete publications.\n\n5.\n+ * XXX Should this detect duplicate columns?\n+ */\n+static void\n+publication_translate_columns(Relation targetrel, List *columns,\n+ int *natts, AttrNumber **attrs)\n{\n...\n+ if (bms_is_member(attnum, set))\n+ ereport(ERROR,\n+ errcode(ERRCODE_DUPLICATE_OBJECT),\n+ errmsg(\"duplicate column \\\"%s\\\" in publication column list\",\n+ colname));\n...\n}\n\nIt seems we already detect duplicate columns in this function. So XXX\npart of the comment doesn't seem to be required.\n\n6.\n+ * XXX The name is a bit misleading, because we don't really transform\n+ * anything here - we merely check the column list is compatible with the\n+ * definition of the publication (with publish_via_partition_root=false)\n+ * we only allow column lists on the leaf relations. So maybe rename it?\n+ */\n+static void\n+TransformPubColumnList(List *tables, const char *queryString,\n+ bool pubviaroot)\n\nThe second parameter is not used in this function. As noted in the\ncomments, I also think it is better to rename this. How about\nValidatePubColumnList?\n\n7.\n+ /*\n+ * FIXME check pubactions vs. replica identity, to ensure the replica\n+ * identity is included in the column list. Only do this for update\n+ * and delete publications. See check_publication_columns.\n+ *\n+ * XXX This is needed because publish_via_partition_root may change,\n+ * in which case the row filters may be invalid (e.g. with pvpr=false\n+ * there must be no filter on partitioned tables).\n+ */\n+\n\nThis entire comment doesn't seem to be required.\n\n8.\n+publication_set_table_columns()\n{\n...\n+ /* XXX \"pub\" is leaked here ??? */\n...\n}\n\nIt is not clear what this means?\n\n9.\n+ * ALTER PUBLICATION name SET COLUMNS table_name (column[, ...])\n+ *\n+ * ALTER PUBLICATION name SET COLUMNS table_name ALL\n+ *\n * pub_obj is one of:\n *\n * TABLE table_name [, ...]\n@@ -9869,6 +9878,32 @@ AlterPublicationStmt:\n n->action = AP_SetObjects;\n $$ = (Node *)n;\n }\n+ | ALTER PUBLICATION name ALTER TABLE relation_expr SET COLUMNS '('\ncolumnList ')'\n\nThe comments in gram.y indicates different rules than the actual implementation.\n\n10.\n+ *\n+ * FIXME Do we need something similar for column filters?\n */\n enum RowFilterPubAction\n\nI have thought about this point and it seems we don't need anything on\nthis front for this patch. We need the filter combining of\nupdate/delete for row filter because if inserts have some column which\nis not present in RI then during update filtering it can give an error\nas the column won't be present in WAL log.\n\nNow, the same problem won't be there for the column list/filter patch\nbecause all the RI columns are there in the column list (for\nupdate/delete) and we don't need to apply a column filter for old\ntuples in either update or delete.\n\nWe can remove this FIXME.\n\n11.\n+ } /* loop all subscribed publications */\n+\n+}\n\nNo need for an empty line here.\n\n[1] - https://www.postgresql.org/message-id/CAA4eK1K5pkrPT9z5TByUPptExian5c18g6GnfNf9Cr97QdPbjw%40mail.gmail.com\n[2] - https://www.postgresql.org/message-id/43c15aa8-aa15-ca0f-40e4-3be68d98df05%40enterprisedb.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 16 Mar 2022 16:57:43 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/15/22 09:30, Tomas Vondra wrote:\n> \n> \n> On 3/15/22 05:43, Amit Kapila wrote:\n>> On Mon, Mar 14, 2022 at 4:42 PM houzj.fnst@fujitsu.com\n>> <houzj.fnst@fujitsu.com> wrote:\n>>>\n>>> On Monday, March 14, 2022 5:08 AM Tomas Vondra <tomas.vondra@enterprisedb.com> wrote:\n>>>>\n>>>> On 3/12/22 05:30, Amit Kapila wrote:\n>>>>>> ...\n>>>>>\n>>>>> Okay, please find attached. I have done basic testing of this, if we\n>>>>> agree with this approach then this will require some more testing.\n>>>>>\n>>>>\n>>>> Thanks, the proposed changes seem like a clear improvement, so I've\n>>>> added them, with some minor tweaks (mostly to comments).\n>>>\n>>> Hi,\n>>>\n>>> Thanks for updating the patches !\n>>> And sorry for the row filter bug caused by my mistake.\n>>>\n>>> I looked at the two fixup patches. I am thinking would it be better if we\n>>> add one testcase for these two bugs? Maybe like the attachment.\n>>>\n>>\n>> Your tests look good to me. We might want to add some comments for\n>> each test but I guess that can be done before committing. Tomas, it\n>> seems you are planning to push these bug fixes, do let me know if you\n>> want me to take care of these while you focus on the main patch? I\n>> think the first patch needs to be backpatched till 13 and the second\n>> one is for just HEAD.\n>>\n> \n> Yeah, I plan to push the fixes later today. I'll polish them a bit\n> first, and merge the tests (shared by Hou zj) into the patches etc.\n> \n\nI've pushed (and backpatched to 13+) the fix for the publish_as_relid\nissue, including the test. I tweaked the test a bit, to check both\norderings of the publication list.\n\nWhile doing that, I discovered yet ANOTHER bug in the publish_as_relid\nloop, affecting 12+13. There was a break once all actions were\nreplicated, but skipping additional publications ignores the fact that\nthe publications may replicate a different (higher-up) ancestor.\n\nI removed the break, if anyone thinks this optimization is worth it we\ncould still do that once we replicate the top-most ancestor.\n\n\nI'll push the second fix soon.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Wed, 16 Mar 2022 18:24:58 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "I notice that the publication.sql regression tests contain a number of \ncomments like\n\n+-- error: replica identity \"a\" not included in the column list\n+ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (b, c);\n\nbut the error doesn't actually happen, because of the way the replica \nidentity checking was changed. This needs to be checked again.\n\n\n", "msg_date": "Thu, 17 Mar 2022 15:17:29 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/17/22 15:17, Peter Eisentraut wrote:\n> I notice that the publication.sql regression tests contain a number of\n> comments like\n> \n> +-- error: replica identity \"a\" not included in the column list\n> +ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (b, c);\n> \n> but the error doesn't actually happen, because of the way the replica\n> identity checking was changed.  This needs to be checked again.\n\nBut the comment describes the error for the whole block, which looks\nlike this:\n\n-- error: replica identity \"a\" not included in the column list\nALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (b, c);\nUPDATE testpub_tbl5 SET a = 1;\nERROR: cannot update table \"testpub_tbl5\"\nDETAIL: Column list used by the publication does not cover the replica\nidentity.\n\nSo IMHO the comment is correct.\n\nBut there was one place where it wasn't entirely clear, as the block was\nsplit by another comment. So I tweaked it to:\n\n-- error: change the replica identity to \"b\", and column list to (a, c)\n-- then update fails, because (a, c) does not cover replica identity\n\nAttached is a rebased patch, on top of the two fixes I pushed.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Thu, 17 Mar 2022 20:11:32 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "I pushed the second fix. Interestingly enough, wrasse failed in the\n013_partition test. I don't see how that could be caused by this\nparticular commit, though - see the pgsql-committers thread [1].\n\nI'd like to test & polish the main patch over the weekend, and get it\ncommitted early next week. Unless someone thinks it's definitely not\nready for that ...\n\n\n[1]\nhttps://www.postgresql.org/message-id/E1nUsch-0008rQ-FW%40gemulon.postgresql.org\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Thu, 17 Mar 2022 20:17:55 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Mar 18, 2022 at 12:47 AM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> I pushed the second fix. Interestingly enough, wrasse failed in the\n> 013_partition test. I don't see how that could be caused by this\n> particular commit, though - see the pgsql-committers thread [1].\n>\n\nI have a theory about what's going on here. I think this is due to a\ntest added in your previous commit c91f71b9dc. The newly added test\nadded hangs in tablesync because there was no apply worker to set the\nstate to SUBREL_STATE_CATCHUP which blocked tablesync workers from\nproceeding.\n\nSee below logs from pogona [1].\n2022-03-18 01:33:15.190 CET [2551176][client\nbackend][3/74:0][013_partition.pl] LOG: statement: ALTER SUBSCRIPTION\nsub2 SET PUBLICATION pub_lower_level, pub_all\n2022-03-18 01:33:15.354 CET [2551193][logical replication\nworker][4/57:0][] LOG: logical replication apply worker for\nsubscription \"sub2\" has started\n2022-03-18 01:33:15.605 CET [2551176][client\nbackend][:0][013_partition.pl] LOG: disconnection: session time:\n0:00:00.415 user=bf database=postgres host=[local]\n2022-03-18 01:33:15.607 CET [2551209][logical replication\nworker][3/76:0][] LOG: logical replication table synchronization\nworker for subscription \"sub2\", table \"tab4_1\" has started\n2022-03-18 01:33:15.609 CET [2551211][logical replication\nworker][5/11:0][] LOG: logical replication table synchronization\nworker for subscription \"sub2\", table \"tab3\" has started\n2022-03-18 01:33:15.617 CET [2551193][logical replication\nworker][4/62:0][] LOG: logical replication apply worker for\nsubscription \"sub2\" will restart because of a parameter change\n\nYou will notice that the apply worker is never restarted after a\nparameter change. The reason was that the particular subscription\nreaches the limit of max_sync_workers_per_subscription after which we\ndon't allow to restart the apply worker. I think you might want to\nincrease the values of\nmax_sync_workers_per_subscription/max_logical_replication_workers to\nmake it work.\n\n> I'd like to test & polish the main patch over the weekend, and get it\n> committed early next week. Unless someone thinks it's definitely not\n> ready for that ...\n>\n\nI think it is in good shape but apart from cleanup, there are issues\nwith dependency handling which I have analyzed and reported as one of\nthe comments in the email [2]. I was getting some weird behavior\nduring my testing due to that. Apart from that still the patch has DDL\nhandling code in tablecmds.c which probably is not required.\nSimilarly, Shi-San has reported an issue with replica full in her\nemail [3]. It is up to you what to do here but it would be good if you\ncan once share the patch after fixing these issues so that we can\nre-test/review it.\n\n\n[1] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=pogona&dt=2022-03-17%2023%3A10%3A04\n[2] - https://www.postgresql.org/message-id/CAA4eK1KR%2ByUQquK0Bx9uO3eb5xB1e0rAD9xKf-ddm5nSf4WfNg%40mail.gmail.com\n[3] - https://www.postgresql.org/message-id/TYAPR01MB6315D664D926EF66DD6E91FCFD109%40TYAPR01MB6315.jpnprd01.prod.outlook.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 18 Mar 2022 11:22:48 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/18/22 06:52, Amit Kapila wrote:\n> On Fri, Mar 18, 2022 at 12:47 AM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> I pushed the second fix. Interestingly enough, wrasse failed in the\n>> 013_partition test. I don't see how that could be caused by this\n>> particular commit, though - see the pgsql-committers thread [1].\n>>\n> \n> I have a theory about what's going on here. I think this is due to a\n> test added in your previous commit c91f71b9dc. The newly added test\n> added hangs in tablesync because there was no apply worker to set the\n> state to SUBREL_STATE_CATCHUP which blocked tablesync workers from\n> proceeding.\n> \n> See below logs from pogona [1].\n> 2022-03-18 01:33:15.190 CET [2551176][client\n> backend][3/74:0][013_partition.pl] LOG: statement: ALTER SUBSCRIPTION\n> sub2 SET PUBLICATION pub_lower_level, pub_all\n> 2022-03-18 01:33:15.354 CET [2551193][logical replication\n> worker][4/57:0][] LOG: logical replication apply worker for\n> subscription \"sub2\" has started\n> 2022-03-18 01:33:15.605 CET [2551176][client\n> backend][:0][013_partition.pl] LOG: disconnection: session time:\n> 0:00:00.415 user=bf database=postgres host=[local]\n> 2022-03-18 01:33:15.607 CET [2551209][logical replication\n> worker][3/76:0][] LOG: logical replication table synchronization\n> worker for subscription \"sub2\", table \"tab4_1\" has started\n> 2022-03-18 01:33:15.609 CET [2551211][logical replication\n> worker][5/11:0][] LOG: logical replication table synchronization\n> worker for subscription \"sub2\", table \"tab3\" has started\n> 2022-03-18 01:33:15.617 CET [2551193][logical replication\n> worker][4/62:0][] LOG: logical replication apply worker for\n> subscription \"sub2\" will restart because of a parameter change\n> \n> You will notice that the apply worker is never restarted after a\n> parameter change. The reason was that the particular subscription\n> reaches the limit of max_sync_workers_per_subscription after which we\n> don't allow to restart the apply worker. I think you might want to\n> increase the values of\n> max_sync_workers_per_subscription/max_logical_replication_workers to\n> make it work.\n> \n\nHmmm. So the theory is that in most runs we manage to sync the tables\nfaster than starting the workers, so we don't hit the limit. But on some\nmachines the sync worker takes a bit longer, we hit the limit. Seems\npossible, yes. Unfortunately we don't seem to log anything when we hit\nthe limit, so hard to say for sure :-( I suggest we add a WARNING\nmessage to logicalrep_worker_launch or something. Not just because of\nthis test, it seems useful in general.\n\nHowever, how come we don't retry the sync? Surely we don't just give up\nforever, that'd be a pretty annoying behavior. Presumably we just end up\nsleeping for a long time before restarting the sync worker, somewhere.\n\n>> I'd like to test & polish the main patch over the weekend, and get it\n>> committed early next week. Unless someone thinks it's definitely not\n>> ready for that ...\n>>\n> \n> I think it is in good shape but apart from cleanup, there are issues\n> with dependency handling which I have analyzed and reported as one of\n> the comments in the email [2]. I was getting some weird behavior\n> during my testing due to that. Apart from that still the patch has DDL\n> handling code in tablecmds.c which probably is not required.\n> Similarly, Shi-San has reported an issue with replica full in her\n> email [3]. It is up to you what to do here but it would be good if you\n> can once share the patch after fixing these issues so that we can\n> re-test/review it.\n\nAh, thanks for reminding me - it's hard to keep track of all the issues\nin threads as long as this one.\n\nBTW do you have any opinion on the SET COLUMNS syntax? Peter Smith\nproposed to get rid of it in [1] but I'm not sure that's a good idea.\nBecause if we ditch it, then removing the column list would look like this:\n\n ALTER PUBLICATION pub ALTER TABLE tab;\n\nAnd if we happen to add other per-table options, this would become\npretty ambiguous.\n\nActually, do we even want to allow resetting column lists like this? We\ndon't allow this for row filters, so if you want to change a row filter\nyou have to re-add the table, right? So maybe we should just ditch ALTER\nTABLE entirely.\n\nregards\n\n[4]\nhttps://www.postgresql.org/message-id/CAHut%2BPtc7Rh187eQKrxdUmUNWyfxz7OkhYAX%3DAW411Qwxya0LQ%40mail.gmail.com\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Fri, 18 Mar 2022 15:43:41 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/18/22 15:43, Tomas Vondra wrote:\n> \n> \n> On 3/18/22 06:52, Amit Kapila wrote:\n>> On Fri, Mar 18, 2022 at 12:47 AM Tomas Vondra\n>> <tomas.vondra@enterprisedb.com> wrote:\n>>>\n>>> I pushed the second fix. Interestingly enough, wrasse failed in the\n>>> 013_partition test. I don't see how that could be caused by this\n>>> particular commit, though - see the pgsql-committers thread [1].\n>>>\n>>\n>> I have a theory about what's going on here. I think this is due to a\n>> test added in your previous commit c91f71b9dc. The newly added test\n>> added hangs in tablesync because there was no apply worker to set the\n>> state to SUBREL_STATE_CATCHUP which blocked tablesync workers from\n>> proceeding.\n>>\n>> See below logs from pogona [1].\n>> 2022-03-18 01:33:15.190 CET [2551176][client\n>> backend][3/74:0][013_partition.pl] LOG: statement: ALTER SUBSCRIPTION\n>> sub2 SET PUBLICATION pub_lower_level, pub_all\n>> 2022-03-18 01:33:15.354 CET [2551193][logical replication\n>> worker][4/57:0][] LOG: logical replication apply worker for\n>> subscription \"sub2\" has started\n>> 2022-03-18 01:33:15.605 CET [2551176][client\n>> backend][:0][013_partition.pl] LOG: disconnection: session time:\n>> 0:00:00.415 user=bf database=postgres host=[local]\n>> 2022-03-18 01:33:15.607 CET [2551209][logical replication\n>> worker][3/76:0][] LOG: logical replication table synchronization\n>> worker for subscription \"sub2\", table \"tab4_1\" has started\n>> 2022-03-18 01:33:15.609 CET [2551211][logical replication\n>> worker][5/11:0][] LOG: logical replication table synchronization\n>> worker for subscription \"sub2\", table \"tab3\" has started\n>> 2022-03-18 01:33:15.617 CET [2551193][logical replication\n>> worker][4/62:0][] LOG: logical replication apply worker for\n>> subscription \"sub2\" will restart because of a parameter change\n>>\n>> You will notice that the apply worker is never restarted after a\n>> parameter change. The reason was that the particular subscription\n>> reaches the limit of max_sync_workers_per_subscription after which we\n>> don't allow to restart the apply worker. I think you might want to\n>> increase the values of\n>> max_sync_workers_per_subscription/max_logical_replication_workers to\n>> make it work.\n>>\n> \n> Hmmm. So the theory is that in most runs we manage to sync the tables\n> faster than starting the workers, so we don't hit the limit. But on some\n> machines the sync worker takes a bit longer, we hit the limit. Seems\n> possible, yes. Unfortunately we don't seem to log anything when we hit\n> the limit, so hard to say for sure :-( I suggest we add a WARNING\n> message to logicalrep_worker_launch or something. Not just because of\n> this test, it seems useful in general.\n> \n> However, how come we don't retry the sync? Surely we don't just give up\n> forever, that'd be a pretty annoying behavior. Presumably we just end up\n> sleeping for a long time before restarting the sync worker, somewhere.\n> \n\nI tried lowering the max_sync_workers_per_subscription to 1 and making\nthe workers to run for a couple seconds (doing some CPU intensive\nstuff), but everything still works just fine.\n\nLooking a bit closer at the logs (from pogona and other), I doubt this\nis about hitting the max_sync_workers_per_subscription limit. Notice we\nstart two sync workers, but neither of them ever completes. So we never\nupdate the sync status or start syncing the remaining tables.\n\nSo the question is why those two sync workers never complete - I guess\nthere's some sort of lock wait (deadlock?) or infinite loop.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Fri, 18 Mar 2022 18:12:20 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/18/22 15:43, Tomas Vondra wrote:\n> \n> \n> On 3/18/22 06:52, Amit Kapila wrote:\n>>\n>> ...\n>> I think it is in good shape but apart from cleanup, there are issues\n>> with dependency handling which I have analyzed and reported as one of\n>> the comments in the email [2]. I was getting some weird behavior\n>> during my testing due to that. Apart from that still the patch has DDL\n>> handling code in tablecmds.c which probably is not required.\n>> Similarly, Shi-San has reported an issue with replica full in her\n>> email [3]. It is up to you what to do here but it would be good if you\n>> can once share the patch after fixing these issues so that we can\n>> re-test/review it.\n> \n> Ah, thanks for reminding me - it's hard to keep track of all the issues\n> in threads as long as this one.\n> \n\nAttached is an updated patch, hopefully addressing these issues.\n\nFirstly, I've reverted the changes in tablecmds.c, instead relying on\nregular dependency behavior. I've also switched from DEPENDENCY_AUTO to\nDEPENDENCY_NORMAL. This makes the code simpler, and the behavior should\nbe the same as for row filters, which makes it more consistent.\n\nAs for the SET COLUMNS breaking behaviors, I've decided to drop this\nfeature entirely, for the reasons outlined earlier today. We don't have\nthat for row filters either, etc. This means the dependency issue simply\ndisappears.\n\nWithout SET COLUMNS, if you want to change the column list you have to\nremove the table from the subscription, and add it back (with the new\ncolumn list). Perhaps inconvenient, but the behavior is clearly defined.\nMaybe we need a more convenient way to tweak column lists, but I'd say\nwe should have the same thing for row filters too.\n\n\nAs for the issue reported by Shi-San about replica identity full and\ncolumn filters, presumably you're referring to this:\n\n create table tbl (a int, b int, c int);\n create publication pub for table tbl (a, b, c);\n alter table tbl replica identity full;\n\n postgres=# delete from tbl;\n ERROR: cannot delete from table \"tbl\"\n DETAIL: Column list used by the publication does not cover the\n replica identity.\n\nI believe not allowing column lists with REPLICA IDENTITY FULL is\nexpected / correct behavior. I mean, for that to work the column list\nhas to always include all columns anyway, so it's pretty pointless. Of\ncourse, we might check that the column list contains everything, but\nconsidering the list does always have to contain all columns, and it\nbreak as soon as you add any columns, it seems reasonable (cheaper) to\njust require no column lists.\n\nI also went through the patch and made the naming more consistent. The\ncomments used both \"column filter\" and \"column list\" randomly, and I\nthink the agreement is to use \"list\" so I adopted that wording.\n\n\nHowever, while looking at how pgoutput, I realized one thing - for row\nfilters we track them \"per operation\", depending on which operations are\ndefined for a given publication. Shouldn't we do the same thing for\ncolumn lists, really?\n\nI mean, if there are two publications with different column lists, one\nfor inserts and the other one for updates, isn't it wrong to merge these\ntwo column lists?\n\nAlso, doesn't this mean publish_as_relid should be \"per operation\" too?\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Fri, 18 Mar 2022 23:26:39 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Fix a compiler warning reported by cfbot.\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Sat, 19 Mar 2022 18:11:38 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/19/22 18:11, Tomas Vondra wrote:\n> Fix a compiler warning reported by cfbot.\n\nApologies, I failed to actually commit the fix. So here we go again.\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Sat, 19 Mar 2022 18:41:15 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Mar 18, 2022 at 10:42 PM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/18/22 15:43, Tomas Vondra wrote:\n> >>\n> >\n> > Hmmm. So the theory is that in most runs we manage to sync the tables\n> > faster than starting the workers, so we don't hit the limit. But on some\n> > machines the sync worker takes a bit longer, we hit the limit. Seems\n> > possible, yes. Unfortunately we don't seem to log anything when we hit\n> > the limit, so hard to say for sure :-( I suggest we add a WARNING\n> > message to logicalrep_worker_launch or something. Not just because of\n> > this test, it seems useful in general.\n> >\n> > However, how come we don't retry the sync? Surely we don't just give up\n> > forever, that'd be a pretty annoying behavior. Presumably we just end up\n> > sleeping for a long time before restarting the sync worker, somewhere.\n> >\n>\n> I tried lowering the max_sync_workers_per_subscription to 1 and making\n> the workers to run for a couple seconds (doing some CPU intensive\n> stuff), but everything still works just fine.\n>\n\nDid the apply worker restarts during that time? If not you can try by\nchanging some subscription parameters which leads to its restart. This\nhas to happen before copy_table has finished. In the LOGS, you should\nsee the message: \"logical replication apply worker for subscription\n\"<subscription_name>\" will restart because of a parameter change\".\nIIUC, the code which doesn't allow to restart the apply worker after\nthe max_sync_workers_per_subscription is reached is as below:\nlogicalrep_worker_launch()\n{\n...\nif (nsyncworkers >= max_sync_workers_per_subscription)\n{\nLWLockRelease(LogicalRepWorkerLock);\nreturn;\n}\n...\n}\n\nThis happens before we allocate a worker to apply. So, it can happen\nonly during the restart of the apply worker because we always first\nthe apply worker, so in that case, it will never restart.\n\n> Looking a bit closer at the logs (from pogona and other), I doubt this\n> is about hitting the max_sync_workers_per_subscription limit. Notice we\n> start two sync workers, but neither of them ever completes. So we never\n> update the sync status or start syncing the remaining tables.\n>\n\nI think they are never completed because they are in a sort of\ninfinite loop. If you see process_syncing_tables_for_sync(), it will\nnever mark the status as SUBREL_STATE_SYNCDONE unless apply worker has\nset it to SUBREL_STATE_CATCHUP. In LogicalRepSyncTableStart(), we do\nwait for a state change to catchup via wait_for_worker_state_change(),\nbut we bail out in that function if the apply worker has died. After\nthat tablesync worker won't be able to complete because in our case\napply worker won't be able to restart.\n\n> So the question is why those two sync workers never complete - I guess\n> there's some sort of lock wait (deadlock?) or infinite loop.\n>\n\nIt would be a bit tricky to reproduce this even if the above theory is\ncorrect but I'll try it today or tomorrow.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Sun, 20 Mar 2022 08:41:40 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Sun, Mar 20, 2022 at 8:41 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Fri, Mar 18, 2022 at 10:42 PM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>\n> > So the question is why those two sync workers never complete - I guess\n> > there's some sort of lock wait (deadlock?) or infinite loop.\n> >\n>\n> It would be a bit tricky to reproduce this even if the above theory is\n> correct but I'll try it today or tomorrow.\n>\n\nI am able to reproduce it with the help of a debugger. Firstly, I have\nadded the LOG message and some While (true) loops to debug sync and\napply workers. Test setup\n\nNode-1:\ncreate table t1(c1);\ncreate table t2(c1);\ninsert into t1 values(1);\ncreate publication pub1 for table t1;\ncreate publication pu2;\n\nNode-2:\nchange max_sync_workers_per_subscription to 1 in potgresql.conf\ncreate table t1(c1);\ncreate table t2(c1);\ncreate subscription sub1 connection 'dbname = postgres' publication pub1;\n\nTill this point, just allow debuggers in both workers just continue.\n\nNode-1:\nalter publication pub1 add table t2;\ninsert into t1 values(2);\n\nHere, we have to debug the apply worker such that when it tries to\napply the insert, stop the debugger in function apply_handle_insert()\nafter doing begin_replication_step().\n\nNode-2:\nalter subscription sub1 set pub1, pub2;\n\nNow, continue the debugger of apply worker, it should first start the\nsync worker and then exit because of parameter change. All of these\ndebugging steps are to just ensure the point that it should first\nstart the sync worker and then exit. After this point, table sync\nworker never finishes and log is filled with messages: \"reached\nmax_sync_workers_per_subscription limit\" (a newly added message by me\nin the attached debug patch).\n\nNow, it is not completely clear to me how exactly '013_partition.pl'\nleads to this situation but there is a possibility based on the LOGs\nit shows.\n\n-- \nWith Regards,\nAmit Kapila.", "msg_date": "Sun, 20 Mar 2022 11:53:41 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/20/22 07:23, Amit Kapila wrote:\n> On Sun, Mar 20, 2022 at 8:41 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>>\n>> On Fri, Mar 18, 2022 at 10:42 PM Tomas Vondra\n>> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>>> So the question is why those two sync workers never complete - I guess\n>>> there's some sort of lock wait (deadlock?) or infinite loop.\n>>>\n>>\n>> It would be a bit tricky to reproduce this even if the above theory is\n>> correct but I'll try it today or tomorrow.\n>>\n> \n> I am able to reproduce it with the help of a debugger. Firstly, I have\n> added the LOG message and some While (true) loops to debug sync and\n> apply workers. Test setup\n> \n> Node-1:\n> create table t1(c1);\n> create table t2(c1);\n> insert into t1 values(1);\n> create publication pub1 for table t1;\n> create publication pu2;\n> \n> Node-2:\n> change max_sync_workers_per_subscription to 1 in potgresql.conf\n> create table t1(c1);\n> create table t2(c1);\n> create subscription sub1 connection 'dbname = postgres' publication pub1;\n> \n> Till this point, just allow debuggers in both workers just continue.\n> \n> Node-1:\n> alter publication pub1 add table t2;\n> insert into t1 values(2);\n> \n> Here, we have to debug the apply worker such that when it tries to\n> apply the insert, stop the debugger in function apply_handle_insert()\n> after doing begin_replication_step().\n> \n> Node-2:\n> alter subscription sub1 set pub1, pub2;\n> \n> Now, continue the debugger of apply worker, it should first start the\n> sync worker and then exit because of parameter change. All of these\n> debugging steps are to just ensure the point that it should first\n> start the sync worker and then exit. After this point, table sync\n> worker never finishes and log is filled with messages: \"reached\n> max_sync_workers_per_subscription limit\" (a newly added message by me\n> in the attached debug patch).\n> \n> Now, it is not completely clear to me how exactly '013_partition.pl'\n> leads to this situation but there is a possibility based on the LOGs\n> it shows.\n> \n\nThanks, I'll take a look later. From the description it seems this is an\nissue that existed before any of the patches, right? It might be more\nlikely to hit due to some test changes, but the root cause is older.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sun, 20 Mar 2022 12:23:39 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Sun, Mar 20, 2022 at 4:53 PM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/20/22 07:23, Amit Kapila wrote:\n> > On Sun, Mar 20, 2022 at 8:41 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >>\n> >> On Fri, Mar 18, 2022 at 10:42 PM Tomas Vondra\n> >> <tomas.vondra@enterprisedb.com> wrote:\n> >>\n> >>> So the question is why those two sync workers never complete - I guess\n> >>> there's some sort of lock wait (deadlock?) or infinite loop.\n> >>>\n> >>\n> >> It would be a bit tricky to reproduce this even if the above theory is\n> >> correct but I'll try it today or tomorrow.\n> >>\n> >\n> > I am able to reproduce it with the help of a debugger. Firstly, I have\n> > added the LOG message and some While (true) loops to debug sync and\n> > apply workers. Test setup\n> >\n> > Node-1:\n> > create table t1(c1);\n> > create table t2(c1);\n> > insert into t1 values(1);\n> > create publication pub1 for table t1;\n> > create publication pu2;\n> >\n> > Node-2:\n> > change max_sync_workers_per_subscription to 1 in potgresql.conf\n> > create table t1(c1);\n> > create table t2(c1);\n> > create subscription sub1 connection 'dbname = postgres' publication pub1;\n> >\n> > Till this point, just allow debuggers in both workers just continue.\n> >\n> > Node-1:\n> > alter publication pub1 add table t2;\n> > insert into t1 values(2);\n> >\n> > Here, we have to debug the apply worker such that when it tries to\n> > apply the insert, stop the debugger in function apply_handle_insert()\n> > after doing begin_replication_step().\n> >\n> > Node-2:\n> > alter subscription sub1 set pub1, pub2;\n> >\n> > Now, continue the debugger of apply worker, it should first start the\n> > sync worker and then exit because of parameter change. All of these\n> > debugging steps are to just ensure the point that it should first\n> > start the sync worker and then exit. After this point, table sync\n> > worker never finishes and log is filled with messages: \"reached\n> > max_sync_workers_per_subscription limit\" (a newly added message by me\n> > in the attached debug patch).\n> >\n> > Now, it is not completely clear to me how exactly '013_partition.pl'\n> > leads to this situation but there is a possibility based on the LOGs\n> > it shows.\n> >\n>\n> Thanks, I'll take a look later. From the description it seems this is an\n> issue that existed before any of the patches, right? It might be more\n> likely to hit due to some test changes, but the root cause is older.\n>\n\nYes, your understanding is correct. If my understanding is correct,\nthen we need probably just need some changes in the new test to make\nit behave as per the current code.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 21 Mar 2022 16:08:59 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Mar 18, 2022 at 8:13 PM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> Ah, thanks for reminding me - it's hard to keep track of all the issues\n> in threads as long as this one.\n>\n> BTW do you have any opinion on the SET COLUMNS syntax? Peter Smith\n> proposed to get rid of it in [1] but I'm not sure that's a good idea.\n> Because if we ditch it, then removing the column list would look like this:\n>\n> ALTER PUBLICATION pub ALTER TABLE tab;\n>\n> And if we happen to add other per-table options, this would become\n> pretty ambiguous.\n>\n> Actually, do we even want to allow resetting column lists like this? We\n> don't allow this for row filters, so if you want to change a row filter\n> you have to re-add the table, right?\n>\n\nWe can use syntax like: \"alter publication pub1 set table t1 where (c2\n> 10);\" to reset the existing row filter. It seems similar thing works\nfor column list as well (\"alter publication pub1 set table t1 (c2)\nwhere (c2 > 10)\"). If I am not missing anything, I don't think we need\nadditional Alter Table syntax.\n\n> So maybe we should just ditch ALTER\n> TABLE entirely.\n>\n\nYeah, I agree especially if my above understanding is correct.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 21 Mar 2022 16:58:53 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Sat, Mar 19, 2022 at 3:56 AM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/18/22 15:43, Tomas Vondra wrote:\n> >\n>\n> As for the issue reported by Shi-San about replica identity full and\n> column filters, presumably you're referring to this:\n>\n> create table tbl (a int, b int, c int);\n> create publication pub for table tbl (a, b, c);\n> alter table tbl replica identity full;\n>\n> postgres=# delete from tbl;\n> ERROR: cannot delete from table \"tbl\"\n> DETAIL: Column list used by the publication does not cover the\n> replica identity.\n>\n> I believe not allowing column lists with REPLICA IDENTITY FULL is\n> expected / correct behavior. I mean, for that to work the column list\n> has to always include all columns anyway, so it's pretty pointless. Of\n> course, we might check that the column list contains everything, but\n> considering the list does always have to contain all columns, and it\n> break as soon as you add any columns, it seems reasonable (cheaper) to\n> just require no column lists.\n>\n\nFair point. We can leave this as it is.\n\n> I also went through the patch and made the naming more consistent. The\n> comments used both \"column filter\" and \"column list\" randomly, and I\n> think the agreement is to use \"list\" so I adopted that wording.\n>\n>\n> However, while looking at how pgoutput, I realized one thing - for row\n> filters we track them \"per operation\", depending on which operations are\n> defined for a given publication. Shouldn't we do the same thing for\n> column lists, really?\n>\n> I mean, if there are two publications with different column lists, one\n> for inserts and the other one for updates, isn't it wrong to merge these\n> two column lists?\n>\n\nThe reason we can't combine row filters for inserts with\nupdates/deletes is that if inserts have some column that is not\npresent in RI then during update filtering (for old tuple) it will\ngive an error as the column won't be present in WAL log.\n\nOTOH, the same problem won't be there for the column list/filter patch\nbecause all the RI columns are there in the column list (for\nupdate/delete) and we don't need to apply a column filter for old\ntuples in either update or delete.\n\nBasically, the filter rules are slightly different for row filters and\ncolumn lists, so we need them (combine of filters) for one but not for\nthe other. Now, for the sake of consistency with row filters, we can\ndo it but as such there won't be any problem or maybe we can just add\na comment for the same in code.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 21 Mar 2022 17:25:28 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hello,\n\nPlease add me to the list of authors of this patch. I made a large\nnumber of nontrivial changes to it early on. Thanks. I have modified\nthe entry in the CF app (which sorts alphabetically, it was not my\nintention to put my name first.)\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\n\n", "msg_date": "Mon, 21 Mar 2022 13:16:37 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Sat, Mar 19, 2022 at 11:11 PM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/19/22 18:11, Tomas Vondra wrote:\n> > Fix a compiler warning reported by cfbot.\n>\n> Apologies, I failed to actually commit the fix. So here we go again.\n>\n\nFew comments:\n===============\n1.\n+/*\n+ * Gets a list of OIDs of all partial-column publications of the given\n+ * relation, that is, those that specify a column list.\n+ */\n+List *\n+GetRelationColumnPartialPublications(Oid relid)\n{\n...\n}\n\n...\n+/*\n+ * For a relation in a publication that is known to have a non-null column\n+ * list, return the list of attribute numbers that are in it.\n+ */\n+List *\n+GetRelationColumnListInPublication(Oid relid, Oid pubid)\n{\n...\n}\n\nBoth these functions are not required now. So, we can remove them.\n\n2.\n@@ -464,11 +478,11 @@ logicalrep_write_update(StringInfo out,\nTransactionId xid, Relation rel,\n pq_sendbyte(out, 'O'); /* old tuple follows */\n else\n pq_sendbyte(out, 'K'); /* old key follows */\n- logicalrep_write_tuple(out, rel, oldslot, binary);\n+ logicalrep_write_tuple(out, rel, oldslot, binary, columns);\n }\n\nAs mentioned previously, here, we should pass NULL similar to\nlogicalrep_write_delete as we don't need to use column list for old\ntuples.\n\n3.\n+ * XXX The name is a bit misleading, because we don't really transform\n+ * anything here - we merely check the column list is compatible with the\n+ * definition of the publication (with publish_via_partition_root=false)\n+ * we only allow column lists on the leaf relations. So maybe rename it?\n+ */\n+static void\n+TransformPubColumnList(List *tables, const char *queryString,\n+ bool pubviaroot)\n\nThe second parameter is not used in this function. As noted in the\ncomments, I also think it is better to rename this. How about\nValidatePubColumnList?\n\n4.\n@@ -821,6 +942,9 @@ fetch_remote_table_info(char *nspname, char *relname,\n *\n * 3) one of the subscribed publications is declared as ALL TABLES IN\n * SCHEMA that includes this relation\n+ *\n+ * XXX Does this actually handle puballtables and schema publications\n+ * correctly?\n */\n if (walrcv_server_version(LogRepWorkerWalRcvConn) >= 150000)\n\nWhy is this comment added in the row filter code? Now, both row filter\nand column list are fetched in the same way, so not sure what exactly\nthis comment is referring to.\n\n5.\n+/* qsort comparator for attnums */\n+static int\n+compare_int16(const void *a, const void *b)\n+{\n+ int av = *(const int16 *) a;\n+ int bv = *(const int16 *) b;\n+\n+ /* this can't overflow if int is wider than int16 */\n+ return (av - bv);\n+}\n\nThe exact same code exists in statscmds.c. Do we need a second copy of the same?\n\n6.\n static void pgoutput_row_filter_init(PGOutputData *data,\n List *publications,\n RelationSyncEntry *entry);\n+\n static bool pgoutput_row_filter_exec_expr(ExprState *state,\n\nSpurious line addition.\n\n7. The tests in 030_column_list.pl take a long time as compared to all\nother similar individual tests in the subscription folder. I haven't\nchecked whether there is any need to reduce some tests but it seems\nworth checking.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 21 Mar 2022 19:42:04 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2022-Mar-19, Tomas Vondra wrote:\n\n> @@ -174,7 +182,13 @@ ALTER PUBLICATION noinsert SET (publish = 'update, delete');\n> <para>\n> Add some tables to the publication:\n> <programlisting>\n> -ALTER PUBLICATION mypublication ADD TABLE users, departments;\n> +ALTER PUBLICATION mypublication ADD TABLE users (user_id, firstname), departments;\n> +</programlisting></para>\n> +\n> + <para>\n> + Change the set of columns published for a table:\n> +<programlisting>\n> +ALTER PUBLICATION mypublication SET TABLE users (user_id, firstname, lastname), TABLE departments;\n> </programlisting></para>\n> \n> <para>\n\nHmm, it seems to me that if you've removed the feature to change the set\nof columns published for a table, then the second example should be\nremoved as well.\n\n> +/*\n> + * Transform the publication column lists expression for all the relations\n> + * in the list.\n> + *\n> + * XXX The name is a bit misleading, because we don't really transform\n> + * anything here - we merely check the column list is compatible with the\n> + * definition of the publication (with publish_via_partition_root=false)\n> + * we only allow column lists on the leaf relations. So maybe rename it?\n> + */\n> +static void\n> +TransformPubColumnList(List *tables, const char *queryString,\n> +\t\t\t\t\t bool pubviaroot)\n> +{\n\nI agree with renaming this function. Maybe CheckPubRelationColumnList() ?\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"This is a foot just waiting to be shot\" (Andrew Dunstan)\n\n\n", "msg_date": "Tue, 22 Mar 2022 20:24:57 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Mar 23, 2022 at 12:54 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2022-Mar-19, Tomas Vondra wrote:\n>\n> > @@ -174,7 +182,13 @@ ALTER PUBLICATION noinsert SET (publish = 'update, delete');\n> > <para>\n> > Add some tables to the publication:\n> > <programlisting>\n> > -ALTER PUBLICATION mypublication ADD TABLE users, departments;\n> > +ALTER PUBLICATION mypublication ADD TABLE users (user_id, firstname), departments;\n> > +</programlisting></para>\n> > +\n> > + <para>\n> > + Change the set of columns published for a table:\n> > +<programlisting>\n> > +ALTER PUBLICATION mypublication SET TABLE users (user_id, firstname, lastname), TABLE departments;\n> > </programlisting></para>\n> >\n> > <para>\n>\n> Hmm, it seems to me that if you've removed the feature to change the set\n> of columns published for a table, then the second example should be\n> removed as well.\n>\n\nAs per my understanding, the removed feature is \"Alter Publication ...\nAlter Table ...\". The example here \"Alter Publication ... Set Table\n..\" should still work as mentioned in my email[1].\n\n[1] - https://www.postgresql.org/message-id/CAA4eK1L6YTcx%3DyJfdudr-y98Wcn4rWX4puHGAa2nxSCRb3fzQw%40mail.gmail.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 23 Mar 2022 07:47:21 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 2022-Mar-23, Amit Kapila wrote:\n\n> On Wed, Mar 23, 2022 at 12:54 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> >\n> > On 2022-Mar-19, Tomas Vondra wrote:\n> >\n> > > @@ -174,7 +182,13 @@ ALTER PUBLICATION noinsert SET (publish = 'update, delete');\n> > > <para>\n> > > Add some tables to the publication:\n> > > <programlisting>\n> > > -ALTER PUBLICATION mypublication ADD TABLE users, departments;\n> > > +ALTER PUBLICATION mypublication ADD TABLE users (user_id, firstname), departments;\n> > > +</programlisting></para>\n> > > +\n> > > + <para>\n> > > + Change the set of columns published for a table:\n> > > +<programlisting>\n> > > +ALTER PUBLICATION mypublication SET TABLE users (user_id, firstname, lastname), TABLE departments;\n> > > </programlisting></para>\n> > >\n> > > <para>\n> >\n> > Hmm, it seems to me that if you've removed the feature to change the set\n> > of columns published for a table, then the second example should be\n> > removed as well.\n> \n> As per my understanding, the removed feature is \"Alter Publication ...\n> Alter Table ...\". The example here \"Alter Publication ... Set Table\n> ..\" should still work as mentioned in my email[1].\n\nAh, I see. Yeah, that makes sense. In that case, the leading text\nseems a bit confusing. I would suggest \"Change the set of tables in the\npublication, specifying a different set of columns for one of them:\"\n\nI think it would make the example more useful if we table for which the\ncolumns are changing is a different one. Maybe do this:\n\n Add some tables to the publication:\n <programlisting>\n-ALTER PUBLICATION mypublication ADD TABLE users, departments;\n+ALTER PUBLICATION mypublication ADD TABLE users (user_id, firstname), departments;\n+</programlisting></para>\n+\n+ <para>\n+ Change the set of tables in the publication, keeping the column list\n+ in the users table and specifying a different column list for the\n+ departments table. Note that previously published tables not mentioned\n+ in this command are removed from the publication:\n+\n+<programlisting>\n+ALTER PUBLICATION mypublication SET TABLE users (user_id, firstname), TABLE departments (dept_id, deptname);\n </programlisting></para>\n\nso that it is clear that if you want to keep the column list unchanged\nin one table, you are forced to specify it again.\n\n(Frankly, this ALTER PUBLICATION SET command seems pretty useless from a\nuser PoV.)\n\n-- \nÁlvaro Herrera PostgreSQL Developer — https://www.EnterpriseDB.com/\n\"Investigación es lo que hago cuando no sé lo que estoy haciendo\"\n(Wernher von Braun)\n\n\n", "msg_date": "Wed, 23 Mar 2022 11:21:23 +0100", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/21/22 12:55, Amit Kapila wrote:\n> On Sat, Mar 19, 2022 at 3:56 AM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> ...\n>>\n>> However, while looking at how pgoutput, I realized one thing - for row\n>> filters we track them \"per operation\", depending on which operations are\n>> defined for a given publication. Shouldn't we do the same thing for\n>> column lists, really?\n>>\n>> I mean, if there are two publications with different column lists, one\n>> for inserts and the other one for updates, isn't it wrong to merge these\n>> two column lists?\n>>\n> \n> The reason we can't combine row filters for inserts with\n> updates/deletes is that if inserts have some column that is not\n> present in RI then during update filtering (for old tuple) it will\n> give an error as the column won't be present in WAL log.\n> \n> OTOH, the same problem won't be there for the column list/filter patch\n> because all the RI columns are there in the column list (for\n> update/delete) and we don't need to apply a column filter for old\n> tuples in either update or delete.\n> \n> Basically, the filter rules are slightly different for row filters and\n> column lists, so we need them (combine of filters) for one but not for\n> the other. Now, for the sake of consistency with row filters, we can\n> do it but as such there won't be any problem or maybe we can just add\n> a comment for the same in code.\n> \n\nOK, thanks for the explanation. I'll add a comment explaining this to\nthe function initializing the column filter.\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Wed, 23 Mar 2022 23:37:54 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/21/22 12:28, Amit Kapila wrote:\n> On Fri, Mar 18, 2022 at 8:13 PM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> Ah, thanks for reminding me - it's hard to keep track of all the issues\n>> in threads as long as this one.\n>>\n>> BTW do you have any opinion on the SET COLUMNS syntax? Peter Smith\n>> proposed to get rid of it in [1] but I'm not sure that's a good idea.\n>> Because if we ditch it, then removing the column list would look like this:\n>>\n>> ALTER PUBLICATION pub ALTER TABLE tab;\n>>\n>> And if we happen to add other per-table options, this would become\n>> pretty ambiguous.\n>>\n>> Actually, do we even want to allow resetting column lists like this? We\n>> don't allow this for row filters, so if you want to change a row filter\n>> you have to re-add the table, right?\n>>\n> \n> We can use syntax like: \"alter publication pub1 set table t1 where (c2\n>> 10);\" to reset the existing row filter. It seems similar thing works\n> for column list as well (\"alter publication pub1 set table t1 (c2)\n> where (c2 > 10)\"). If I am not missing anything, I don't think we need\n> additional Alter Table syntax.\n> \n>> So maybe we should just ditch ALTER\n>> TABLE entirely.\n>>\n> \n> Yeah, I agree especially if my above understanding is correct.\n> \n\nI think there's a gotcha that\n\n ALTER PUBLICATION pub SET TABLE t ...\n\nalso removes all other relations from the publication, and it removes\nand re-adds the table anyway. So I'm not sure what's the advantage?\n\nAnyway, I don't see why we would need such ALTER TABLE only for column\nfilters and not for row filters - either we need to allow this for both\noptions or none of them.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Wed, 23 Mar 2022 23:41:48 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Mar 24, 2022 at 4:11 AM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/21/22 12:28, Amit Kapila wrote:\n> > On Fri, Mar 18, 2022 at 8:13 PM Tomas Vondra\n> > <tomas.vondra@enterprisedb.com> wrote:\n> >>\n> >> Ah, thanks for reminding me - it's hard to keep track of all the issues\n> >> in threads as long as this one.\n> >>\n> >> BTW do you have any opinion on the SET COLUMNS syntax? Peter Smith\n> >> proposed to get rid of it in [1] but I'm not sure that's a good idea.\n> >> Because if we ditch it, then removing the column list would look like this:\n> >>\n> >> ALTER PUBLICATION pub ALTER TABLE tab;\n> >>\n> >> And if we happen to add other per-table options, this would become\n> >> pretty ambiguous.\n> >>\n> >> Actually, do we even want to allow resetting column lists like this? We\n> >> don't allow this for row filters, so if you want to change a row filter\n> >> you have to re-add the table, right?\n> >>\n> >\n> > We can use syntax like: \"alter publication pub1 set table t1 where (c2\n> >> 10);\" to reset the existing row filter. It seems similar thing works\n> > for column list as well (\"alter publication pub1 set table t1 (c2)\n> > where (c2 > 10)\"). If I am not missing anything, I don't think we need\n> > additional Alter Table syntax.\n> >\n> >> So maybe we should just ditch ALTER\n> >> TABLE entirely.\n> >>\n> >\n> > Yeah, I agree especially if my above understanding is correct.\n> >\n>\n> I think there's a gotcha that\n>\n> ALTER PUBLICATION pub SET TABLE t ...\n>\n> also removes all other relations from the publication, and it removes\n> and re-adds the table anyway. So I'm not sure what's the advantage?\n>\n\nI think it could be used when the user has fewer tables and she wants\nto change the list of published tables or their row/column filters. I\nam not sure of the value of this to users but this was a pre-existing\nsyntax.\n\n> Anyway, I don't see why we would need such ALTER TABLE only for column\n> filters and not for row filters - either we need to allow this for both\n> options or none of them.\n>\n\n+1. I think for now we can leave this new ALTER TABLE syntax and do it\nfor both column and row filters together.\n\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 24 Mar 2022 08:28:25 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 17.03.22 20:11, Tomas Vondra wrote:\n> But the comment describes the error for the whole block, which looks\n> like this:\n> \n> -- error: replica identity \"a\" not included in the column list\n> ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (b, c);\n> UPDATE testpub_tbl5 SET a = 1;\n> ERROR: cannot update table \"testpub_tbl5\"\n> DETAIL: Column list used by the publication does not cover the replica\n> identity.\n> \n> So IMHO the comment is correct.\n\nOk, that makes sense. I read all the comments in the test file again. \nThere were a couple that I think could use tweaking; see attached file. \nThe ones with \"???\" didn't make sense to me: The first one is before a \ncommand that doesn't seem to change anything, the second one I didn't \nunderstand the meaning. Please take a look.\n\n(The patch is actually based on your 20220318c patch, but I'm adding it \nhere since we have the discussion here.)", "msg_date": "Thu, 24 Mar 2022 17:33:56 +0100", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/24/22 17:33, Peter Eisentraut wrote:\n> \n> On 17.03.22 20:11, Tomas Vondra wrote:\n>> But the comment describes the error for the whole block, which looks\n>> like this:\n>>\n>> -- error: replica identity \"a\" not included in the column list\n>> ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl5 (b, c);\n>> UPDATE testpub_tbl5 SET a = 1;\n>> ERROR:  cannot update table \"testpub_tbl5\"\n>> DETAIL:  Column list used by the publication does not cover the replica\n>> identity.\n>>\n>> So IMHO the comment is correct.\n> \n> Ok, that makes sense.  I read all the comments in the test file again.\n> There were a couple that I think could use tweaking; see attached file.\n> The ones with \"???\" didn't make sense to me:  The first one is before a\n> command that doesn't seem to change anything, the second one I didn't\n> understand the meaning.  Please take a look.\n> \n\nThanks, the proposed changes seem reasonable. As for the two unclear\ntests/comments:\n\n -- make sure changing the column list is updated in SET TABLE (???)\n CREATE TABLE testpub_tbl7 (a int primary key, b text, c text);\n ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl7 (a, b);\n \\d+ testpub_tbl7\n\n -- ok: we'll skip this table (???)\n ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl7 (a, b);\n \\d+ testpub_tbl7\n\n -- ok: update the column list\n ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl7 (a, c);\n \\d+ testpub_tbl7\n\nThe goal of this test is to verify that we handle column lists correctly\nin SET TABLE. That is, if the column list matches the currently set one,\nwe should just skip the table in SET TABLE. If it's different, we need\nto update the catalog. That's what the first comment is trying to say.\n\nIt's true we can't really check we skip the table in the SetObject code,\nbut we can at least ensure there's no error and the column list remains\nthe same.\n\nAnd we're not replicating any data in regression tests, so it might\nhappen we discard the new column list, for example. Hence the second\ntest, which ensures we end up with the modified column list.\n\nAttached is a patch, rebased on top of the sequence decoding stuff I\npushed earlier today, also including the comments rewording, and\nrenaming the \"transform\" function.\n\nI'll go over it again and get it pushed soon, unless someone objects.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company", "msg_date": "Fri, 25 Mar 2022 01:14:48 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Mar 25, 2022 at 5:44 AM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> Attached is a patch, rebased on top of the sequence decoding stuff I\n> pushed earlier today, also including the comments rewording, and\n> renaming the \"transform\" function.\n>\n> I'll go over it again and get it pushed soon, unless someone objects.\n>\n\nYou haven't addressed the comments given by me earlier this week. See\nhttps://www.postgresql.org/message-id/CAA4eK1LY_JGL7LvdT64ujEiEAVaADuhdej1QNnwxvO_-KPzeEg%40mail.gmail.com.\n\n*\n+ * XXX The name is a bit misleading, because we don't really transform\n+ * anything here - we merely check the column list is compatible with the\n+ * definition of the publication (with publish_via_partition_root=false)\n+ * we only allow column lists on the leaf relations. So maybe rename it?\n+ */\n+static void\n+CheckPubRelationColumnList(List *tables, const char *queryString,\n+ bool pubviaroot)\n\nAfter changing this function name, the comment above is not required.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 25 Mar 2022 08:40:04 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/25/22 04:10, Amit Kapila wrote:\n> On Fri, Mar 25, 2022 at 5:44 AM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> Attached is a patch, rebased on top of the sequence decoding stuff I\n>> pushed earlier today, also including the comments rewording, and\n>> renaming the \"transform\" function.\n>>\n>> I'll go over it again and get it pushed soon, unless someone objects.\n>>\n> \n> You haven't addressed the comments given by me earlier this week. See\n> https://www.postgresql.org/message-id/CAA4eK1LY_JGL7LvdT64ujEiEAVaADuhdej1QNnwxvO_-KPzeEg%40mail.gmail.com.\n> \n\nThanks for noticing that! Thunderbird did not include that message into\nthe patch thread for some reason, so I did not notice that!\n\n> *\n> + * XXX The name is a bit misleading, because we don't really transform\n> + * anything here - we merely check the column list is compatible with the\n> + * definition of the publication (with publish_via_partition_root=false)\n> + * we only allow column lists on the leaf relations. So maybe rename it?\n> + */\n> +static void\n> +CheckPubRelationColumnList(List *tables, const char *queryString,\n> + bool pubviaroot)\n> \n> After changing this function name, the comment above is not required.\n> \n\nThanks, comment updated.\n\nI went over the patch again, polished the commit message a bit, and\npushed. May the buildfarm be merciful!\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sat, 26 Mar 2022 01:18:08 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/21/22 15:12, Amit Kapila wrote:\n> On Sat, Mar 19, 2022 at 11:11 PM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> On 3/19/22 18:11, Tomas Vondra wrote:\n>>> Fix a compiler warning reported by cfbot.\n>>\n>> Apologies, I failed to actually commit the fix. So here we go again.\n>>\n> \n> Few comments:\n> ===============\n> 1.\n> +/*\n> + * Gets a list of OIDs of all partial-column publications of the given\n> + * relation, that is, those that specify a column list.\n> + */\n> +List *\n> +GetRelationColumnPartialPublications(Oid relid)\n> {\n> ...\n> }\n> \n> ...\n> +/*\n> + * For a relation in a publication that is known to have a non-null column\n> + * list, return the list of attribute numbers that are in it.\n> + */\n> +List *\n> +GetRelationColumnListInPublication(Oid relid, Oid pubid)\n> {\n> ...\n> }\n> \n> Both these functions are not required now. So, we can remove them.\n> \n\nGood catch, removed.\n\n> 2.\n> @@ -464,11 +478,11 @@ logicalrep_write_update(StringInfo out,\n> TransactionId xid, Relation rel,\n> pq_sendbyte(out, 'O'); /* old tuple follows */\n> else\n> pq_sendbyte(out, 'K'); /* old key follows */\n> - logicalrep_write_tuple(out, rel, oldslot, binary);\n> + logicalrep_write_tuple(out, rel, oldslot, binary, columns);\n> }\n> \n> As mentioned previously, here, we should pass NULL similar to\n> logicalrep_write_delete as we don't need to use column list for old\n> tuples.\n> \n\nFixed.\n\n> 3.\n> + * XXX The name is a bit misleading, because we don't really transform\n> + * anything here - we merely check the column list is compatible with the\n> + * definition of the publication (with publish_via_partition_root=false)\n> + * we only allow column lists on the leaf relations. So maybe rename it?\n> + */\n> +static void\n> +TransformPubColumnList(List *tables, const char *queryString,\n> + bool pubviaroot)\n> \n> The second parameter is not used in this function. As noted in the\n> comments, I also think it is better to rename this. How about\n> ValidatePubColumnList?\n> \n> 4.\n> @@ -821,6 +942,9 @@ fetch_remote_table_info(char *nspname, char *relname,\n> *\n> * 3) one of the subscribed publications is declared as ALL TABLES IN\n> * SCHEMA that includes this relation\n> + *\n> + * XXX Does this actually handle puballtables and schema publications\n> + * correctly?\n> */\n> if (walrcv_server_version(LogRepWorkerWalRcvConn) >= 150000)\n> \n> Why is this comment added in the row filter code? Now, both row filter\n> and column list are fetched in the same way, so not sure what exactly\n> this comment is referring to.\n> \n\nI added that comment as a note to myself while learning about how the\ncode works, forgot to remove that.\n\n> 5.\n> +/* qsort comparator for attnums */\n> +static int\n> +compare_int16(const void *a, const void *b)\n> +{\n> + int av = *(const int16 *) a;\n> + int bv = *(const int16 *) b;\n> +\n> + /* this can't overflow if int is wider than int16 */\n> + return (av - bv);\n> +}\n> \n> The exact same code exists in statscmds.c. Do we need a second copy of the same?\n> \n\nYeah, I thought about moving it to some common header, but I think it's\nnot really worth it at this point.\n\n> 6.\n> static void pgoutput_row_filter_init(PGOutputData *data,\n> List *publications,\n> RelationSyncEntry *entry);\n> +\n> static bool pgoutput_row_filter_exec_expr(ExprState *state,\n> \n> Spurious line addition.\n> \n\nFixed.\n\n> 7. The tests in 030_column_list.pl take a long time as compared to all\n> other similar individual tests in the subscription folder. I haven't\n> checked whether there is any need to reduce some tests but it seems\n> worth checking.\n> \n\nOn my machine, 'make check' in src/test/subscription takes ~150 seconds\n(with asserts and -O0), and the new script takes ~14 seconds, while most\nother tests have 3-6 seconds.\n\nAFAICS that's simply due to the number of tests in the script, and I\ndon't think there are any unnecessary ones. I was actually adding them\nin response to issues reported during development, or to test various\nimportant cases. So I don't think we can remove some of them easily :-(\n\nAnd it's not like the tests are using massive amounts of data either.\n\nWe could split the test, but that obviously won't reduce the duration,\nof course.\n\nSo I decided to keep the test as is, for now, and maybe we can try\nreducing the test after a couple buildfarm runs.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sat, 26 Mar 2022 01:29:35 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/26/22 01:18, Tomas Vondra wrote:\n>\n> ...\n> \n> I went over the patch again, polished the commit message a bit, and\n> pushed. May the buildfarm be merciful!\n> \n\nThere's a couple failures immediately after the push, which caused me a\nminor heart attack. But it seems all of those are strange failures\nrelated to configure (which the patch did not touch at all), on animals\nmanaged by Andres. And a couple animals succeeded since then.\n\nSo I guess the animals were reconfigured, or something ...\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sat, 26 Mar 2022 01:35:23 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hello, \r\n\r\nThe 'prattrs' column has been added to the pg_publication_rel catalog, \r\nbut the current commit to catalog.sgml seems to have added it to pg_publication_namespace. \r\nThe attached patch fixes this.\r\n\r\nRegards,\r\nNoriyoshi Shinoda\r\n-----Original Message-----\r\nFrom: Tomas Vondra <tomas.vondra@enterprisedb.com> \r\nSent: Saturday, March 26, 2022 9:35 AM\r\nTo: Amit Kapila <amit.kapila16@gmail.com>\r\nCc: Peter Eisentraut <peter.eisentraut@enterprisedb.com>; houzj.fnst@fujitsu.com; Alvaro Herrera <alvherre@alvh.no-ip.org>; Justin Pryzby <pryzby@telsasoft.com>; Rahila Syed <rahilasyed90@gmail.com>; Peter Smith <smithpb2250@gmail.com>; pgsql-hackers <pgsql-hackers@postgresql.org>; shiy.fnst@fujitsu.com\r\nSubject: Re: Column Filtering in Logical Replication\r\n\r\nOn 3/26/22 01:18, Tomas Vondra wrote:\r\n>\r\n> ...\r\n> \r\n> I went over the patch again, polished the commit message a bit, and \r\n> pushed. May the buildfarm be merciful!\r\n> \r\n\r\nThere's a couple failures immediately after the push, which caused me a minor heart attack. But it seems all of those are strange failures related to configure (which the patch did not touch at all), on animals managed by Andres. And a couple animals succeeded since then.\r\n\r\nSo I guess the animals were reconfigured, or something ...\r\n\r\n\r\nregards\r\n\r\n--\r\nTomas Vondra\r\nEnterpriseDB: http://www.enterprisedb.com\r\nThe Enterprise PostgreSQL Company", "msg_date": "Sat, 26 Mar 2022 04:09:17 +0000", "msg_from": "\"Shinoda, Noriyoshi (PN Japan FSIP)\" <noriyoshi.shinoda@hpe.com>", "msg_from_op": false, "msg_subject": "RE: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/26/22 05:09, Shinoda, Noriyoshi (PN Japan FSIP) wrote:\n> Hello, \n> \n> The 'prattrs' column has been added to the pg_publication_rel catalog, \n> but the current commit to catalog.sgml seems to have added it to pg_publication_namespace. \n> The attached patch fixes this.\n> \n\nThanks, I'll get this pushed.\n\nSadly, while looking at the catalog docs I realized I forgot to bump the\ncatversion :-(\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sat, 26 Mar 2022 10:58:05 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/26/22 10:58, Tomas Vondra wrote:\n> On 3/26/22 05:09, Shinoda, Noriyoshi (PN Japan FSIP) wrote:\n>> Hello, \n>>\n>> The 'prattrs' column has been added to the pg_publication_rel catalog, \n>> but the current commit to catalog.sgml seems to have added it to pg_publication_namespace. \n>> The attached patch fixes this.\n>>\n> \n> Thanks, I'll get this pushed.\n> \n\nPushed. Thanks for noticing this!\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sat, 26 Mar 2022 19:15:37 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Tomas Vondra <tomas.vondra@enterprisedb.com> writes:\n> I went over the patch again, polished the commit message a bit, and\n> pushed. May the buildfarm be merciful!\n\nInitial results aren't that great. komodoensis[1], petalura[2],\nand snapper[3] have all shown variants of\n\n# Failed test 'partitions with different replica identities not replicated correctly'\n# at t/031_column_list.pl line 734.\n# got: '2|4|\n# 4|9|'\n# expected: '1||5\n# 2|4|\n# 3||8\n# 4|9|'\n# Looks like you failed 1 test of 34.\n[18:19:36] t/031_column_list.pl ............... \nDubious, test returned 1 (wstat 256, 0x100)\nFailed 1/34 subtests \n\nsnapper reported different actual output than the other two:\n# got: '1||5\n# 3||8'\n\nThe failure seems intermittent, as both komodoensis and petalura\nhave also passed cleanly since the commit (snapper's only run once).\n\nThis smells like an uninitialized-variable problem, but I've had\nno luck finding any problem under valgrind. Not sure how to progress\nfrom here.\n\n\t\t\tregards, tom lane\n\n[1] https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=komodoensis&dt=2022-03-26%2015%3A54%3A04\n[2] https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=petalura&dt=2022-03-26%2004%3A20%3A04\n[3] https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=snapper&dt=2022-03-26%2018%3A46%3A28\n\n\n", "msg_date": "Sat, 26 Mar 2022 17:37:14 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/26/22 22:37, Tom Lane wrote:\n> Tomas Vondra <tomas.vondra@enterprisedb.com> writes:\n>> I went over the patch again, polished the commit message a bit, and\n>> pushed. May the buildfarm be merciful!\n> \n> Initial results aren't that great. komodoensis[1], petalura[2],\n> and snapper[3] have all shown variants of\n> \n> # Failed test 'partitions with different replica identities not replicated correctly'\n> # at t/031_column_list.pl line 734.\n> # got: '2|4|\n> # 4|9|'\n> # expected: '1||5\n> # 2|4|\n> # 3||8\n> # 4|9|'\n> # Looks like you failed 1 test of 34.\n> [18:19:36] t/031_column_list.pl ............... \n> Dubious, test returned 1 (wstat 256, 0x100)\n> Failed 1/34 subtests \n> \n> snapper reported different actual output than the other two:\n> # got: '1||5\n> # 3||8'\n> \n> The failure seems intermittent, as both komodoensis and petalura\n> have also passed cleanly since the commit (snapper's only run once).\n> \n> This smells like an uninitialized-variable problem, but I've had\n> no luck finding any problem under valgrind. Not sure how to progress\n> from here.\n> \n\nI think I see the problem - there's a CREATE SUBSCRIPTION but the test\nis not waiting for the tablesync to complete, so sometimes it finishes\nin time and sometimes not. That'd explain the flaky behavior, and it's\njust this one test that misses the sync AFAICS.\n\nFWIW I did run this under valgrind a number of times, and also on\nvarious ARM machines that tend to trip over memory issues.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sat, 26 Mar 2022 22:52:53 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Tomas Vondra <tomas.vondra@enterprisedb.com> writes:\n> On 3/26/22 22:37, Tom Lane wrote:\n>> This smells like an uninitialized-variable problem, but I've had\n>> no luck finding any problem under valgrind. Not sure how to progress\n>> from here.\n\n> I think I see the problem - there's a CREATE SUBSCRIPTION but the test\n> is not waiting for the tablesync to complete, so sometimes it finishes\n> in time and sometimes not. That'd explain the flaky behavior, and it's\n> just this one test that misses the sync AFAICS.\n\nAh, that would also fit the symptoms.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sat, 26 Mar 2022 17:55:55 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/26/22 22:55, Tom Lane wrote:\n> Tomas Vondra <tomas.vondra@enterprisedb.com> writes:\n>> On 3/26/22 22:37, Tom Lane wrote:\n>>> This smells like an uninitialized-variable problem, but I've had\n>>> no luck finding any problem under valgrind. Not sure how to progress\n>>> from here.\n> \n>> I think I see the problem - there's a CREATE SUBSCRIPTION but the test\n>> is not waiting for the tablesync to complete, so sometimes it finishes\n>> in time and sometimes not. That'd explain the flaky behavior, and it's\n>> just this one test that misses the sync AFAICS.\n> \n> Ah, that would also fit the symptoms.\n> \n\nI'll go over the test to check if some other test misses that, and\nperhaps do a bit of testing, and then push a fix.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sat, 26 Mar 2022 22:58:01 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 3/26/22 22:58, Tomas Vondra wrote:\n> On 3/26/22 22:55, Tom Lane wrote:\n>> Tomas Vondra <tomas.vondra@enterprisedb.com> writes:\n>>> On 3/26/22 22:37, Tom Lane wrote:\n>>>> This smells like an uninitialized-variable problem, but I've had\n>>>> no luck finding any problem under valgrind. Not sure how to progress\n>>>> from here.\n>>\n>>> I think I see the problem - there's a CREATE SUBSCRIPTION but the test\n>>> is not waiting for the tablesync to complete, so sometimes it finishes\n>>> in time and sometimes not. That'd explain the flaky behavior, and it's\n>>> just this one test that misses the sync AFAICS.\n>>\n>> Ah, that would also fit the symptoms.\n>>\n> \n> I'll go over the test to check if some other test misses that, and\n> perhaps do a bit of testing, and then push a fix.\n> \n\nPushed. I checked the other tests in 031_column_list.pl and I AFAICS all\nof them are waiting for the sync correctly.\n\n\n[rolls eyes] I just noticed I listed the file as .sql in the commit\nmessage. Not great.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Sun, 27 Mar 2022 00:39:52 +0100", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Sun, Mar 20, 2022 at 4:53 PM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/20/22 07:23, Amit Kapila wrote:\n> > On Sun, Mar 20, 2022 at 8:41 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >>\n> >> On Fri, Mar 18, 2022 at 10:42 PM Tomas Vondra\n> >> <tomas.vondra@enterprisedb.com> wrote:\n> >>\n> >>> So the question is why those two sync workers never complete - I guess\n> >>> there's some sort of lock wait (deadlock?) or infinite loop.\n> >>>\n> >>\n> >> It would be a bit tricky to reproduce this even if the above theory is\n> >> correct but I'll try it today or tomorrow.\n> >>\n> >\n> > I am able to reproduce it with the help of a debugger. Firstly, I have\n> > added the LOG message and some While (true) loops to debug sync and\n> > apply workers. Test setup\n> >\n> > Node-1:\n> > create table t1(c1);\n> > create table t2(c1);\n> > insert into t1 values(1);\n> > create publication pub1 for table t1;\n> > create publication pu2;\n> >\n> > Node-2:\n> > change max_sync_workers_per_subscription to 1 in potgresql.conf\n> > create table t1(c1);\n> > create table t2(c1);\n> > create subscription sub1 connection 'dbname = postgres' publication pub1;\n> >\n> > Till this point, just allow debuggers in both workers just continue.\n> >\n> > Node-1:\n> > alter publication pub1 add table t2;\n> > insert into t1 values(2);\n> >\n> > Here, we have to debug the apply worker such that when it tries to\n> > apply the insert, stop the debugger in function apply_handle_insert()\n> > after doing begin_replication_step().\n> >\n> > Node-2:\n> > alter subscription sub1 set pub1, pub2;\n> >\n> > Now, continue the debugger of apply worker, it should first start the\n> > sync worker and then exit because of parameter change. All of these\n> > debugging steps are to just ensure the point that it should first\n> > start the sync worker and then exit. After this point, table sync\n> > worker never finishes and log is filled with messages: \"reached\n> > max_sync_workers_per_subscription limit\" (a newly added message by me\n> > in the attached debug patch).\n> >\n> > Now, it is not completely clear to me how exactly '013_partition.pl'\n> > leads to this situation but there is a possibility based on the LOGs\n> > it shows.\n> >\n>\n> Thanks, I'll take a look later.\n>\n\nThis is still failing [1][2].\n\n[1] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=florican&dt=2022-03-28%2005%3A16%3A53\n[2] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=flaviventris&dt=2022-03-24%2013%3A13%3A08\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 29 Mar 2022 15:30:22 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/29/22 12:00, Amit Kapila wrote:\n> On Sun, Mar 20, 2022 at 4:53 PM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> On 3/20/22 07:23, Amit Kapila wrote:\n>>> On Sun, Mar 20, 2022 at 8:41 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>>>>\n>>>> On Fri, Mar 18, 2022 at 10:42 PM Tomas Vondra\n>>>> <tomas.vondra@enterprisedb.com> wrote:\n>>>>\n>>>>> So the question is why those two sync workers never complete - I guess\n>>>>> there's some sort of lock wait (deadlock?) or infinite loop.\n>>>>>\n>>>>\n>>>> It would be a bit tricky to reproduce this even if the above theory is\n>>>> correct but I'll try it today or tomorrow.\n>>>>\n>>>\n>>> I am able to reproduce it with the help of a debugger. Firstly, I have\n>>> added the LOG message and some While (true) loops to debug sync and\n>>> apply workers. Test setup\n>>>\n>>> Node-1:\n>>> create table t1(c1);\n>>> create table t2(c1);\n>>> insert into t1 values(1);\n>>> create publication pub1 for table t1;\n>>> create publication pu2;\n>>>\n>>> Node-2:\n>>> change max_sync_workers_per_subscription to 1 in potgresql.conf\n>>> create table t1(c1);\n>>> create table t2(c1);\n>>> create subscription sub1 connection 'dbname = postgres' publication pub1;\n>>>\n>>> Till this point, just allow debuggers in both workers just continue.\n>>>\n>>> Node-1:\n>>> alter publication pub1 add table t2;\n>>> insert into t1 values(2);\n>>>\n>>> Here, we have to debug the apply worker such that when it tries to\n>>> apply the insert, stop the debugger in function apply_handle_insert()\n>>> after doing begin_replication_step().\n>>>\n>>> Node-2:\n>>> alter subscription sub1 set pub1, pub2;\n>>>\n>>> Now, continue the debugger of apply worker, it should first start the\n>>> sync worker and then exit because of parameter change. All of these\n>>> debugging steps are to just ensure the point that it should first\n>>> start the sync worker and then exit. After this point, table sync\n>>> worker never finishes and log is filled with messages: \"reached\n>>> max_sync_workers_per_subscription limit\" (a newly added message by me\n>>> in the attached debug patch).\n>>>\n>>> Now, it is not completely clear to me how exactly '013_partition.pl'\n>>> leads to this situation but there is a possibility based on the LOGs\n>>> it shows.\n>>>\n>>\n>> Thanks, I'll take a look later.\n>>\n> \n> This is still failing [1][2].\n> \n> [1] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=florican&dt=2022-03-28%2005%3A16%3A53\n> [2] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=flaviventris&dt=2022-03-24%2013%3A13%3A08\n> \n\nAFAICS we've concluded this is a pre-existing issue, not something\nintroduced by a recently committed patch, and I don't think there's any\nproposal how to fix that. So I've put that on the back burner until\nafter the current CF.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Tue, 29 Mar 2022 13:03:34 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, Mar 29, 2022 at 4:33 PM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/29/22 12:00, Amit Kapila wrote:\n> >>\n> >> Thanks, I'll take a look later.\n> >>\n> >\n> > This is still failing [1][2].\n> >\n> > [1] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=florican&dt=2022-03-28%2005%3A16%3A53\n> > [2] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=flaviventris&dt=2022-03-24%2013%3A13%3A08\n> >\n>\n> AFAICS we've concluded this is a pre-existing issue, not something\n> introduced by a recently committed patch, and I don't think there's any\n> proposal how to fix that.\n>\n\nI have suggested in email [1] that increasing values\nmax_sync_workers_per_subscription/max_logical_replication_workers\nshould solve this issue. Now, whether this is a previous issue or\nbehavior can be debatable but I think it happens for the new test case\nadded by commit c91f71b9dc.\n\n> So I've put that on the back burner until\n> after the current CF.\n>\n\nOkay, last time you didn't mention that you want to look at it after\nCF. I just assumed that you want to take a look after pushing the main\ncolumn list patch, so thought of sending a reminder but I am fine if\nyou want to look at it after CF.\n\n[1] - https://www.postgresql.org/message-id/CAA4eK1LpBFU49Ohbnk%3Ddv_v9YP%2BKqh1%2BSf8i%2B%2B_s-QhD1Gy4Qw%40mail.gmail.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 29 Mar 2022 17:17:18 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/29/22 13:47, Amit Kapila wrote:\n> On Tue, Mar 29, 2022 at 4:33 PM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> On 3/29/22 12:00, Amit Kapila wrote:\n>>>>\n>>>> Thanks, I'll take a look later.\n>>>>\n>>>\n>>> This is still failing [1][2].\n>>>\n>>> [1] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=florican&dt=2022-03-28%2005%3A16%3A53\n>>> [2] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=flaviventris&dt=2022-03-24%2013%3A13%3A08\n>>>\n>>\n>> AFAICS we've concluded this is a pre-existing issue, not something\n>> introduced by a recently committed patch, and I don't think there's any\n>> proposal how to fix that.\n>>\n> \n> I have suggested in email [1] that increasing values\n> max_sync_workers_per_subscription/max_logical_replication_workers\n> should solve this issue. Now, whether this is a previous issue or\n> behavior can be debatable but I think it happens for the new test case\n> added by commit c91f71b9dc.\n> \n\nIMHO that'd be just hiding the actual issue, which is the failure to\nsync the subscription in some circumstances. We should fix that, not\njust make sure the tests don't trigger it.\n\n>> So I've put that on the back burner until\n>> after the current CF.\n>>\n> \n> Okay, last time you didn't mention that you want to look at it after\n> CF. I just assumed that you want to take a look after pushing the main\n> column list patch, so thought of sending a reminder but I am fine if\n> you want to look at it after CF.\n> \n\nOK, sorry for not being clearer in my response.\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Tue, 29 Mar 2022 14:39:37 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, Mar 29, 2022 at 6:09 PM Tomas Vondra\n<tomas.vondra@enterprisedb.com> wrote:\n>\n> On 3/29/22 13:47, Amit Kapila wrote:\n> > On Tue, Mar 29, 2022 at 4:33 PM Tomas Vondra\n> > <tomas.vondra@enterprisedb.com> wrote:\n> >>\n> >> On 3/29/22 12:00, Amit Kapila wrote:\n> >>>>\n> >>>> Thanks, I'll take a look later.\n> >>>>\n> >>>\n> >>> This is still failing [1][2].\n> >>>\n> >>> [1] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=florican&dt=2022-03-28%2005%3A16%3A53\n> >>> [2] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=flaviventris&dt=2022-03-24%2013%3A13%3A08\n> >>>\n> >>\n> >> AFAICS we've concluded this is a pre-existing issue, not something\n> >> introduced by a recently committed patch, and I don't think there's any\n> >> proposal how to fix that.\n> >>\n> >\n> > I have suggested in email [1] that increasing values\n> > max_sync_workers_per_subscription/max_logical_replication_workers\n> > should solve this issue. Now, whether this is a previous issue or\n> > behavior can be debatable but I think it happens for the new test case\n> > added by commit c91f71b9dc.\n> >\n>\n> IMHO that'd be just hiding the actual issue, which is the failure to\n> sync the subscription in some circumstances. We should fix that, not\n> just make sure the tests don't trigger it.\n>\n\nI am in favor of fixing/changing some existing behavior to make it\nbetter and would be ready to help in that investigation as well but\nwas just not sure if it is a good idea to let some of the buildfarm\nmember(s) fail for a number of days. Anyway, I leave this judgment to\nyou.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 30 Mar 2022 08:16:03 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "\n\nOn 3/30/22 04:46, Amit Kapila wrote:\n> On Tue, Mar 29, 2022 at 6:09 PM Tomas Vondra\n> <tomas.vondra@enterprisedb.com> wrote:\n>>\n>> On 3/29/22 13:47, Amit Kapila wrote:\n>>> On Tue, Mar 29, 2022 at 4:33 PM Tomas Vondra\n>>> <tomas.vondra@enterprisedb.com> wrote:\n>>>>\n>>>> On 3/29/22 12:00, Amit Kapila wrote:\n>>>>>>\n>>>>>> Thanks, I'll take a look later.\n>>>>>>\n>>>>>\n>>>>> This is still failing [1][2].\n>>>>>\n>>>>> [1] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=florican&dt=2022-03-28%2005%3A16%3A53\n>>>>> [2] - https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=flaviventris&dt=2022-03-24%2013%3A13%3A08\n>>>>>\n>>>>\n>>>> AFAICS we've concluded this is a pre-existing issue, not something\n>>>> introduced by a recently committed patch, and I don't think there's any\n>>>> proposal how to fix that.\n>>>>\n>>>\n>>> I have suggested in email [1] that increasing values\n>>> max_sync_workers_per_subscription/max_logical_replication_workers\n>>> should solve this issue. Now, whether this is a previous issue or\n>>> behavior can be debatable but I think it happens for the new test case\n>>> added by commit c91f71b9dc.\n>>>\n>>\n>> IMHO that'd be just hiding the actual issue, which is the failure to\n>> sync the subscription in some circumstances. We should fix that, not\n>> just make sure the tests don't trigger it.\n>>\n> \n> I am in favor of fixing/changing some existing behavior to make it\n> better and would be ready to help in that investigation as well but\n> was just not sure if it is a good idea to let some of the buildfarm\n> member(s) fail for a number of days. Anyway, I leave this judgment to\n> you.\n> \n\nOK. If it affected more animals, and/or if they were failing more often,\nit'd definitely warrant a more active approach. But AFAICS it affects\nonly a tiny fraction, and even there it fails maybe 1 in 20 runs ...\n\nPlus the symptoms are pretty clear, it's unlikely to cause enigmatic\nfailures, forcing people to spend time on investigating it.\n\nOf course, that's my assessment and it feels weird as it goes directly\nagainst my instincts to keep all tests working :-/\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Wed, 30 Mar 2022 14:01:38 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Sun, Mar 20, 2022 at 3:23 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Sun, Mar 20, 2022 at 8:41 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >\n> > On Fri, Mar 18, 2022 at 10:42 PM Tomas Vondra\n> > <tomas.vondra@enterprisedb.com> wrote:\n> >\n> > > So the question is why those two sync workers never complete - I guess\n> > > there's some sort of lock wait (deadlock?) or infinite loop.\n> > >\n> >\n> > It would be a bit tricky to reproduce this even if the above theory is\n> > correct but I'll try it today or tomorrow.\n> >\n>\n> I am able to reproduce it with the help of a debugger. Firstly, I have\n> added the LOG message and some While (true) loops to debug sync and\n> apply workers. Test setup\n>\n> Node-1:\n> create table t1(c1);\n> create table t2(c1);\n> insert into t1 values(1);\n> create publication pub1 for table t1;\n> create publication pu2;\n>\n> Node-2:\n> change max_sync_workers_per_subscription to 1 in potgresql.conf\n> create table t1(c1);\n> create table t2(c1);\n> create subscription sub1 connection 'dbname = postgres' publication pub1;\n>\n> Till this point, just allow debuggers in both workers just continue.\n>\n> Node-1:\n> alter publication pub1 add table t2;\n> insert into t1 values(2);\n>\n> Here, we have to debug the apply worker such that when it tries to\n> apply the insert, stop the debugger in function apply_handle_insert()\n> after doing begin_replication_step().\n>\n> Node-2:\n> alter subscription sub1 set pub1, pub2;\n>\n> Now, continue the debugger of apply worker, it should first start the\n> sync worker and then exit because of parameter change. All of these\n> debugging steps are to just ensure the point that it should first\n> start the sync worker and then exit. After this point, table sync\n> worker never finishes and log is filled with messages: \"reached\n> max_sync_workers_per_subscription limit\" (a newly added message by me\n> in the attached debug patch).\n>\n> Now, it is not completely clear to me how exactly '013_partition.pl'\n> leads to this situation but there is a possibility based on the LOGs\n\nI've looked at this issue and had the same analysis. Also, I could\nreproduce this issue with the steps shared by Amit.\n\nAs I mentioned in another thread[1], the fact that the tablesync\nworker doesn't check the return value from\nwait_for_worker_state_change() seems a bug to me. So my initial\nthought of the solution is that we can have the tablesync worker check\nthe return value and exit if it's false. That way, the apply worker\ncan restart and request to launch the tablesync worker again. What do\nyou think?\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Wed, 13 Apr 2022 17:10:27 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Apr 13, 2022 at 1:41 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> I've looked at this issue and had the same analysis. Also, I could\n> reproduce this issue with the steps shared by Amit.\n>\n> As I mentioned in another thread[1], the fact that the tablesync\n> worker doesn't check the return value from\n> wait_for_worker_state_change() seems a bug to me. So my initial\n> thought of the solution is that we can have the tablesync worker check\n> the return value and exit if it's false. That way, the apply worker\n> can restart and request to launch the tablesync worker again. What do\n> you think?\n>\n\nI think that will fix this symptom but I am not sure if that would be\nthe best way to deal with this because we have a mechanism where the\nsync worker can continue even if we don't do anything as a result of\nwait_for_worker_state_change() provided apply worker restarts.\n\nThe other part of the puzzle is the below check in the code:\n/*\n* If we reached the sync worker limit per subscription, just exit\n* silently as we might get here because of an otherwise harmless race\n* condition.\n*/\nif (nsyncworkers >= max_sync_workers_per_subscription)\n\nIt is not clear to me why this check is there, if this wouldn't be\nthere, the user would have got either a WARNING to increase the\nmax_logical_replication_workers or the apply worker would have been\nrestarted. Do you have any idea about this?\n\nYet another option is that we ensure that before launching sync\nworkers (say in process_syncing_tables_for_apply->FetchTableStates,\nwhen we have to start a new transaction) we again call\nmaybe_reread_subscription(), which should also fix this symptom. But\nagain, I am not sure why it should be compulsory to call\nmaybe_reread_subscription() in such a situation, there are no comments\nwhich suggest it,\n\nNow, the reason why it appeared recently in commit c91f71b9dc is that\nI think we have increased the number of initial table syncs in that\ntest, and probably increasing\nmax_sync_workers_per_subscription/max_logical_replication_workers\nshould fix that test.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 13 Apr 2022 15:15:41 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "(\n\nOn Wed, Apr 13, 2022 at 6:45 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Wed, Apr 13, 2022 at 1:41 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > I've looked at this issue and had the same analysis. Also, I could\n> > reproduce this issue with the steps shared by Amit.\n> >\n> > As I mentioned in another thread[1], the fact that the tablesync\n> > worker doesn't check the return value from\n> > wait_for_worker_state_change() seems a bug to me. So my initial\n> > thought of the solution is that we can have the tablesync worker check\n> > the return value and exit if it's false. That way, the apply worker\n> > can restart and request to launch the tablesync worker again. What do\n> > you think?\n> >\n>\n> I think that will fix this symptom but I am not sure if that would be\n> the best way to deal with this because we have a mechanism where the\n> sync worker can continue even if we don't do anything as a result of\n> wait_for_worker_state_change() provided apply worker restarts.\n\nI think we can think this is a separate issue. That is, if tablesync\nworker can start streaming changes even without waiting for the apply\nworker to set SUBREL_STATE_CATCHUP, do we really need the wait? I'm\nnot sure it's really safe. If it's safe, the tablesync worker will no\nlonger need to wait there.\n\n>\n> The other part of the puzzle is the below check in the code:\n> /*\n> * If we reached the sync worker limit per subscription, just exit\n> * silently as we might get here because of an otherwise harmless race\n> * condition.\n> */\n> if (nsyncworkers >= max_sync_workers_per_subscription)\n>\n> It is not clear to me why this check is there, if this wouldn't be\n> there, the user would have got either a WARNING to increase the\n> max_logical_replication_workers or the apply worker would have been\n> restarted. Do you have any idea about this?\n\nYeah, I'm also puzzled with this check. It seems that this function\ndoesn't work well when the apply worker is not running and some\ntablesync workers are running. I initially thought that the apply\nworker calls to this function as many as tables that needs to be\nsynced, but it checks the max_sync_workers_per_subscription limit\nbefore calling to logicalrep_worker_launch(). So I'm not really sure\nwe need this check.\n\n>\n> Yet another option is that we ensure that before launching sync\n> workers (say in process_syncing_tables_for_apply->FetchTableStates,\n> when we have to start a new transaction) we again call\n> maybe_reread_subscription(), which should also fix this symptom. But\n> again, I am not sure why it should be compulsory to call\n> maybe_reread_subscription() in such a situation, there are no comments\n> which suggest it,\n\nYes, it will fix this issue.\n\n>\n> Now, the reason why it appeared recently in commit c91f71b9dc is that\n> I think we have increased the number of initial table syncs in that\n> test, and probably increasing\n> max_sync_workers_per_subscription/max_logical_replication_workers\n> should fix that test.\n\nI think so too.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Thu, 14 Apr 2022 12:02:16 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Apr 14, 2022 at 8:32 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Wed, Apr 13, 2022 at 6:45 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >\n> > On Wed, Apr 13, 2022 at 1:41 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > I've looked at this issue and had the same analysis. Also, I could\n> > > reproduce this issue with the steps shared by Amit.\n> > >\n> > > As I mentioned in another thread[1], the fact that the tablesync\n> > > worker doesn't check the return value from\n> > > wait_for_worker_state_change() seems a bug to me. So my initial\n> > > thought of the solution is that we can have the tablesync worker check\n> > > the return value and exit if it's false. That way, the apply worker\n> > > can restart and request to launch the tablesync worker again. What do\n> > > you think?\n> > >\n> >\n> > I think that will fix this symptom but I am not sure if that would be\n> > the best way to deal with this because we have a mechanism where the\n> > sync worker can continue even if we don't do anything as a result of\n> > wait_for_worker_state_change() provided apply worker restarts.\n>\n> I think we can think this is a separate issue. That is, if tablesync\n> worker can start streaming changes even without waiting for the apply\n> worker to set SUBREL_STATE_CATCHUP, do we really need the wait? I'm\n> not sure it's really safe. If it's safe, the tablesync worker will no\n> longer need to wait there.\n>\n\nAs per my understanding, it is safe, whatever is streamed by tablesync\nworker will be skipped later by apply worker. The wait here avoids\nstreaming the same data both by the apply worker and table sync worker\nwhich I think is good even if it is not a must.\n\n> >\n> > The other part of the puzzle is the below check in the code:\n> > /*\n> > * If we reached the sync worker limit per subscription, just exit\n> > * silently as we might get here because of an otherwise harmless race\n> > * condition.\n> > */\n> > if (nsyncworkers >= max_sync_workers_per_subscription)\n> >\n> > It is not clear to me why this check is there, if this wouldn't be\n> > there, the user would have got either a WARNING to increase the\n> > max_logical_replication_workers or the apply worker would have been\n> > restarted. Do you have any idea about this?\n>\n> Yeah, I'm also puzzled with this check. It seems that this function\n> doesn't work well when the apply worker is not running and some\n> tablesync workers are running. I initially thought that the apply\n> worker calls to this function as many as tables that needs to be\n> synced, but it checks the max_sync_workers_per_subscription limit\n> before calling to logicalrep_worker_launch(). So I'm not really sure\n> we need this check.\n>\n\nI just hope that the original author Petr J. responds to this point. I\nhave added him to this email. This will help us to find the best\nsolution for this problem.\n\nNote: I'll be away for the remaining week, so will join the discussion\nnext week unless we reached the conclusion by that time.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 14 Apr 2022 09:09:31 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Apr 14, 2022 at 9:09 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Thu, Apr 14, 2022 at 8:32 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > The other part of the puzzle is the below check in the code:\n> > > /*\n> > > * If we reached the sync worker limit per subscription, just exit\n> > > * silently as we might get here because of an otherwise harmless race\n> > > * condition.\n> > > */\n> > > if (nsyncworkers >= max_sync_workers_per_subscription)\n> > >\n> > > It is not clear to me why this check is there, if this wouldn't be\n> > > there, the user would have got either a WARNING to increase the\n> > > max_logical_replication_workers or the apply worker would have been\n> > > restarted. Do you have any idea about this?\n> >\n> > Yeah, I'm also puzzled with this check. It seems that this function\n> > doesn't work well when the apply worker is not running and some\n> > tablesync workers are running. I initially thought that the apply\n> > worker calls to this function as many as tables that needs to be\n> > synced, but it checks the max_sync_workers_per_subscription limit\n> > before calling to logicalrep_worker_launch(). So I'm not really sure\n> > we need this check.\n> >\n>\n> I just hope that the original author Petr J. responds to this point. I\n> have added him to this email. This will help us to find the best\n> solution for this problem.\n>\n\nI did some more investigation for this code. It is added by commit [1]\nand the patch that led to this commit is first time posted on -hackers\nin email [2]. Now, neither the commit message nor the patch (comments)\ngives much idea as to why this part of code is added but I think there\nis some hint in the email [2]. In particular, read the paragraph in\nthe email [2] that has the lines: \".... and limiting sync workers per\nsubscription theoretically wasn't either (although I don't think it\ncould happen in practice).\".\n\nIt seems that this check has been added to theoretically limit the\nsync workers even though that can't happen because apply worker\nensures that before trying to launch the sync worker. Does this theory\nmake sense to me? If so, I think we can change the check as: \"if\n(OidIsValid(relid) && nsyncworkers >=\nmax_sync_workers_per_subscription)\" in launcher.c. This will serve the\npurpose of the original code and will solve the issue being discussed\nhere. I think we can even backpatch this. What do you think?\n\n[1]\ncommit de4389712206d2686e09ad8d6dd112dc4b6c6d42\nAuthor: Peter Eisentraut <peter_e@gmx.net>\nDate: Wed Apr 26 10:43:04 2017 -0400\n\n Fix various concurrency issues in logical replication worker launching\n\n[2] - https://www.postgresql.org/message-id/fa387e24-0e26-c02d-ef16-7e46ada200dd%402ndquadrant.com\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 18 Apr 2022 16:34:06 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 4/18/22 13:04, Amit Kapila wrote:\n> On Thu, Apr 14, 2022 at 9:09 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>>\n>> On Thu, Apr 14, 2022 at 8:32 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>>>>\n>>>> The other part of the puzzle is the below check in the code:\n>>>> /*\n>>>> * If we reached the sync worker limit per subscription, just exit\n>>>> * silently as we might get here because of an otherwise harmless race\n>>>> * condition.\n>>>> */\n>>>> if (nsyncworkers >= max_sync_workers_per_subscription)\n>>>>\n>>>> It is not clear to me why this check is there, if this wouldn't be\n>>>> there, the user would have got either a WARNING to increase the\n>>>> max_logical_replication_workers or the apply worker would have been\n>>>> restarted. Do you have any idea about this?\n>>>\n>>> Yeah, I'm also puzzled with this check. It seems that this function\n>>> doesn't work well when the apply worker is not running and some\n>>> tablesync workers are running. I initially thought that the apply\n>>> worker calls to this function as many as tables that needs to be\n>>> synced, but it checks the max_sync_workers_per_subscription limit\n>>> before calling to logicalrep_worker_launch(). So I'm not really sure\n>>> we need this check.\n>>>\n>>\n>> I just hope that the original author Petr J. responds to this point. I\n>> have added him to this email. This will help us to find the best\n>> solution for this problem.\n>>\n> \n> I did some more investigation for this code. It is added by commit [1]\n> and the patch that led to this commit is first time posted on -hackers\n> in email [2]. Now, neither the commit message nor the patch (comments)\n> gives much idea as to why this part of code is added but I think there\n> is some hint in the email [2]. In particular, read the paragraph in\n> the email [2] that has the lines: \".... and limiting sync workers per\n> subscription theoretically wasn't either (although I don't think it\n> could happen in practice).\".\n> \n> It seems that this check has been added to theoretically limit the\n> sync workers even though that can't happen because apply worker\n> ensures that before trying to launch the sync worker. Does this theory\n> make sense to me? If so, I think we can change the check as: \"if\n> (OidIsValid(relid) && nsyncworkers >=\n> max_sync_workers_per_subscription)\" in launcher.c. This will serve the\n> purpose of the original code and will solve the issue being discussed\n> here. I think we can even backpatch this. What do you think?\n> \n\nSounds reasonable to me. It's unfortunate there's no explanation of what\nexactly is the commit message fixing (and why), but I doubt anyone will\nremember the details after 5 years.\n\n+1 to backpatching, I consider this to be a bug\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Mon, 18 Apr 2022 14:36:00 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Apr 18, 2022 at 8:04 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Thu, Apr 14, 2022 at 9:09 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >\n> > On Thu, Apr 14, 2022 at 8:32 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > >\n> > > > The other part of the puzzle is the below check in the code:\n> > > > /*\n> > > > * If we reached the sync worker limit per subscription, just exit\n> > > > * silently as we might get here because of an otherwise harmless race\n> > > > * condition.\n> > > > */\n> > > > if (nsyncworkers >= max_sync_workers_per_subscription)\n> > > >\n> > > > It is not clear to me why this check is there, if this wouldn't be\n> > > > there, the user would have got either a WARNING to increase the\n> > > > max_logical_replication_workers or the apply worker would have been\n> > > > restarted. Do you have any idea about this?\n> > >\n> > > Yeah, I'm also puzzled with this check. It seems that this function\n> > > doesn't work well when the apply worker is not running and some\n> > > tablesync workers are running. I initially thought that the apply\n> > > worker calls to this function as many as tables that needs to be\n> > > synced, but it checks the max_sync_workers_per_subscription limit\n> > > before calling to logicalrep_worker_launch(). So I'm not really sure\n> > > we need this check.\n> > >\n> >\n> > I just hope that the original author Petr J. responds to this point. I\n> > have added him to this email. This will help us to find the best\n> > solution for this problem.\n> >\n>\n> I did some more investigation for this code. It is added by commit [1]\n> and the patch that led to this commit is first time posted on -hackers\n> in email [2]. Now, neither the commit message nor the patch (comments)\n> gives much idea as to why this part of code is added but I think there\n> is some hint in the email [2]. In particular, read the paragraph in\n> the email [2] that has the lines: \".... and limiting sync workers per\n> subscription theoretically wasn't either (although I don't think it\n> could happen in practice).\".\n>\n> It seems that this check has been added to theoretically limit the\n> sync workers even though that can't happen because apply worker\n> ensures that before trying to launch the sync worker. Does this theory\n> make sense to me? If so, I think we can change the check as: \"if\n> (OidIsValid(relid) && nsyncworkers >=\n> max_sync_workers_per_subscription)\" in launcher.c. This will serve the\n> purpose of the original code and will solve the issue being discussed\n> here. I think we can even backpatch this. What do you think?\n\n+1. I also think it's a bug so back-patching makes sense to me.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Tue, 19 Apr 2022 10:27:40 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, Apr 19, 2022 at 6:58 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Apr 18, 2022 at 8:04 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >\n> > On Thu, Apr 14, 2022 at 9:09 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> > >\n> > > On Thu, Apr 14, 2022 at 8:32 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > > >\n> > > > > The other part of the puzzle is the below check in the code:\n> > > > > /*\n> > > > > * If we reached the sync worker limit per subscription, just exit\n> > > > > * silently as we might get here because of an otherwise harmless race\n> > > > > * condition.\n> > > > > */\n> > > > > if (nsyncworkers >= max_sync_workers_per_subscription)\n> > > > >\n> > > > > It is not clear to me why this check is there, if this wouldn't be\n> > > > > there, the user would have got either a WARNING to increase the\n> > > > > max_logical_replication_workers or the apply worker would have been\n> > > > > restarted. Do you have any idea about this?\n> > > >\n> > > > Yeah, I'm also puzzled with this check. It seems that this function\n> > > > doesn't work well when the apply worker is not running and some\n> > > > tablesync workers are running. I initially thought that the apply\n> > > > worker calls to this function as many as tables that needs to be\n> > > > synced, but it checks the max_sync_workers_per_subscription limit\n> > > > before calling to logicalrep_worker_launch(). So I'm not really sure\n> > > > we need this check.\n> > > >\n> > >\n> > > I just hope that the original author Petr J. responds to this point. I\n> > > have added him to this email. This will help us to find the best\n> > > solution for this problem.\n> > >\n> >\n> > I did some more investigation for this code. It is added by commit [1]\n> > and the patch that led to this commit is first time posted on -hackers\n> > in email [2]. Now, neither the commit message nor the patch (comments)\n> > gives much idea as to why this part of code is added but I think there\n> > is some hint in the email [2]. In particular, read the paragraph in\n> > the email [2] that has the lines: \".... and limiting sync workers per\n> > subscription theoretically wasn't either (although I don't think it\n> > could happen in practice).\".\n> >\n> > It seems that this check has been added to theoretically limit the\n> > sync workers even though that can't happen because apply worker\n> > ensures that before trying to launch the sync worker. Does this theory\n> > make sense to me? If so, I think we can change the check as: \"if\n> > (OidIsValid(relid) && nsyncworkers >=\n> > max_sync_workers_per_subscription)\" in launcher.c. This will serve the\n> > purpose of the original code and will solve the issue being discussed\n> > here. I think we can even backpatch this. What do you think?\n>\n> +1. I also think it's a bug so back-patching makes sense to me.\n>\n\nPushed. Thanks Tomas and Sawada-San.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 19 Apr 2022 10:23:22 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Hi,\r\n\r\nOn 4/19/22 12:53 AM, Amit Kapila wrote:\r\n> On Tue, Apr 19, 2022 at 6:58 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\r\n>>\r\n>> +1. I also think it's a bug so back-patching makes sense to me.\r\n>>\r\n> \r\n> Pushed. Thanks Tomas and Sawada-San.\r\n\r\nThis is still on the PG15 open items list[1] though marked as with a fix.\r\n\r\nDid dd4ab6fd resolve the issue, or does this need more work?\r\n\r\nThanks,\r\n\r\nJonathan\r\n\r\n[1] https://wiki.postgresql.org/wiki/PostgreSQL_15_Open_Items", "msg_date": "Tue, 10 May 2022 09:55:18 -0400", "msg_from": "\"Jonathan S. Katz\" <jkatz@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 5/10/22 15:55, Jonathan S. Katz wrote:\n> Hi,\n> \n> On 4/19/22 12:53 AM, Amit Kapila wrote:\n>> On Tue, Apr 19, 2022 at 6:58 AM Masahiko Sawada\n>> <sawada.mshk@gmail.com> wrote:\n>>>\n>>> +1. I also think it's a bug so back-patching makes sense to me.\n>>>\n>>\n>> Pushed. Thanks Tomas and Sawada-San.\n> \n> This is still on the PG15 open items list[1] though marked as with a fix.\n> \n> Did dd4ab6fd resolve the issue, or does this need more work?\n> \n\nI believe that's fixed, the buildfarm does not seem to show any relevant\nfailures in subscriptionCheck since dd4ab6fd got committed.\n\n\nregards\n\n-- \nTomas Vondra\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n", "msg_date": "Tue, 10 May 2022 21:17:45 +0200", "msg_from": "Tomas Vondra <tomas.vondra@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On 5/10/22 3:17 PM, Tomas Vondra wrote:\r\n> On 5/10/22 15:55, Jonathan S. Katz wrote:\r\n>> Hi,\r\n>>\r\n>> On 4/19/22 12:53 AM, Amit Kapila wrote:\r\n>>> On Tue, Apr 19, 2022 at 6:58 AM Masahiko Sawada\r\n>>> <sawada.mshk@gmail.com> wrote:\r\n>>>>\r\n>>>> +1. I also think it's a bug so back-patching makes sense to me.\r\n>>>>\r\n>>>\r\n>>> Pushed. Thanks Tomas and Sawada-San.\r\n>>\r\n>> This is still on the PG15 open items list[1] though marked as with a fix.\r\n>>\r\n>> Did dd4ab6fd resolve the issue, or does this need more work?\r\n>>\r\n> \r\n> I believe that's fixed, the buildfarm does not seem to show any relevant\r\n> failures in subscriptionCheck since dd4ab6fd got committed.\r\n\r\nGreat. I'm moving it off of open items.\r\n\r\nThanks for confirming!\r\n\r\nJonathan", "msg_date": "Tue, 10 May 2022 15:28:51 -0400", "msg_from": "\"Jonathan S. Katz\" <jkatz@postgresql.org>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, May 10, 2022 at 7:25 PM Jonathan S. Katz <jkatz@postgresql.org> wrote:\n>\n> On 4/19/22 12:53 AM, Amit Kapila wrote:\n> > On Tue, Apr 19, 2022 at 6:58 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >>\n> >> +1. I also think it's a bug so back-patching makes sense to me.\n> >>\n> >\n> > Pushed. Thanks Tomas and Sawada-San.\n>\n> This is still on the PG15 open items list[1] though marked as with a fix.\n>\n> Did dd4ab6fd resolve the issue, or does this need more work?\n>\n\nThe commit dd4ab6fd resolved this issue. I didn't notice it after that commit.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 11 May 2022 08:50:21 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Sat, Dec 11, 2021 at 12:24 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n>\n> On 2021-Dec-10, Peter Eisentraut wrote:\n>\n...\n>\n> > There was no documentation, so I wrote a bit (patch 0001). It only touches\n> > the CREATE PUBLICATION and ALTER PUBLICATION pages at the moment. There was\n> > no mention in the Logical Replication chapter that warranted updating.\n> > Perhaps we should revisit that chapter at the end of the release cycle.\n>\n> Thanks. I hadn't looked at the docs yet, so I'll definitely take this.\n>\n\nWas this documentation ever written?\n\nMy assumption was that for PG15 there might be a whole new section\nadded to Chapter 31 [1] for describing \"Column Lists\" (i.e. the Column\nList equivalent of the \"Row Filters\" section)\n\n------\n[1] https://www.postgresql.org/docs/15/logical-replication.html\n\nKind Regards,\nPeter Smith.\nFujitsu Australia\n\n\n", "msg_date": "Mon, 25 Jul 2022 17:57:09 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Jul 25, 2022 at 1:27 PM Peter Smith <smithpb2250@gmail.com> wrote:\n>\n> On Sat, Dec 11, 2021 at 12:24 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> >\n> > On 2021-Dec-10, Peter Eisentraut wrote:\n> >\n> ...\n> >\n> > > There was no documentation, so I wrote a bit (patch 0001). It only touches\n> > > the CREATE PUBLICATION and ALTER PUBLICATION pages at the moment. There was\n> > > no mention in the Logical Replication chapter that warranted updating.\n> > > Perhaps we should revisit that chapter at the end of the release cycle.\n> >\n> > Thanks. I hadn't looked at the docs yet, so I'll definitely take this.\n> >\n>\n> Was this documentation ever written?\n>\n> My assumption was that for PG15 there might be a whole new section\n> added to Chapter 31 [1] for describing \"Column Lists\" (i.e. the Column\n> List equivalent of the \"Row Filters\" section)\n>\n\n+1. I think it makes sense to give more description about this feature\nsimilar to Row Filters. Note that apart from the main feature commit\n[1], we have prohibited certain cases in commit [2]. So, one might\nwant to cover that as well.\n\n[1] - https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=923def9a533a7d986acfb524139d8b9e5466d0a5\n[2] - https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=fd0b9dcebda7b931a41ce5c8e86d13f2efd0af2e\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 2 Aug 2022 14:27:38 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, Aug 2, 2022 at 6:57 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Mon, Jul 25, 2022 at 1:27 PM Peter Smith <smithpb2250@gmail.com> wrote:\n> >\n> > On Sat, Dec 11, 2021 at 12:24 AM Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:\n> > >\n> > > On 2021-Dec-10, Peter Eisentraut wrote:\n> > >\n> > ...\n> > >\n> > > > There was no documentation, so I wrote a bit (patch 0001). It only touches\n> > > > the CREATE PUBLICATION and ALTER PUBLICATION pages at the moment. There was\n> > > > no mention in the Logical Replication chapter that warranted updating.\n> > > > Perhaps we should revisit that chapter at the end of the release cycle.\n> > >\n> > > Thanks. I hadn't looked at the docs yet, so I'll definitely take this.\n> > >\n> >\n> > Was this documentation ever written?\n> >\n> > My assumption was that for PG15 there might be a whole new section\n> > added to Chapter 31 [1] for describing \"Column Lists\" (i.e. the Column\n> > List equivalent of the \"Row Filters\" section)\n> >\n>\n> +1. I think it makes sense to give more description about this feature\n> similar to Row Filters. Note that apart from the main feature commit\n> [1], we have prohibited certain cases in commit [2]. So, one might\n> want to cover that as well.\n>\n> [1] - https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=923def9a533a7d986acfb524139d8b9e5466d0a5\n> [2] - https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=fd0b9dcebda7b931a41ce5c8e86d13f2efd0af2e\n>\n\nOK. Unless somebody else has already started this work then I can do\nthis. I will post a draft patch in a few days.\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia\n\n\n", "msg_date": "Wed, 3 Aug 2022 08:42:01 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "PSA patch version v1* for a new \"Column Lists\" pgdocs section\n\nThis is just a first draft, but I wanted to post it as-is, with the\nhope that I can get some feedback while continuing to work on it.\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia", "msg_date": "Mon, 8 Aug 2022 18:37:51 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Aug 8, 2022 at 2:08 PM Peter Smith <smithpb2250@gmail.com> wrote:\n>\n> PSA patch version v1* for a new \"Column Lists\" pgdocs section\n>\n> This is just a first draft, but I wanted to post it as-is, with the\n> hope that I can get some feedback while continuing to work on it.\n\nFew comments:\n1) Row filters mentions that \"It has no effect on TRUNCATE commands.\",\nthe same is not present in case of column filters. We should keep the\nchanges similarly for consistency.\n--- a/doc/src/sgml/ref/create_publication.sgml\n+++ b/doc/src/sgml/ref/create_publication.sgml\n@@ -90,8 +90,7 @@ CREATE PUBLICATION <replaceable\nclass=\"parameter\">name</replaceable>\n <para>\n When a column list is specified, only the named columns are replicated.\n If no column list is specified, all columns of the table are replicated\n- through this publication, including any columns added later. If a column\n- list is specified, it must include the replica identity columns.\n+ through this publication, including any columns added later.\n\n2) The document says that \"if the table uses REPLICA IDENTITY FULL,\nspecifying a column list is not allowed.\":\n+ publishes only <command>INSERT</command> operations. Furthermore, if the\n+ table uses <literal>REPLICA IDENTITY FULL</literal>, specifying a column\n+ list is not allowed.\n+ </para>\n\nDid you mean specifying a column list during create publication for\nREPLICA IDENTITY FULL table like below scenario:\npostgres=# create table t2(c1 int, c2 int, c3 int);\nCREATE TABLE\npostgres=# alter table t2 replica identity full ;\nALTER TABLE\npostgres=# create publication pub1 for table t2(c1,c2);\nCREATE PUBLICATION\n\nIf so, the document says specifying column list is not allowed, but\ncreating a publication with column list on replica identity full was\nsuccessful.\n\nRegards,\nVignesh\n\n\n", "msg_date": "Tue, 16 Aug 2022 22:34:26 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Thanks for the view of v1-0001.\n\nOn Wed, Aug 17, 2022 at 3:04 AM vignesh C <vignesh21@gmail.com> wrote:\n...\n> 1) Row filters mentions that \"It has no effect on TRUNCATE commands.\",\n> the same is not present in case of column filters. We should keep the\n> changes similarly for consistency.\n> --- a/doc/src/sgml/ref/create_publication.sgml\n> +++ b/doc/src/sgml/ref/create_publication.sgml\n> @@ -90,8 +90,7 @@ CREATE PUBLICATION <replaceable\n> class=\"parameter\">name</replaceable>\n> <para>\n> When a column list is specified, only the named columns are replicated.\n> If no column list is specified, all columns of the table are replicated\n> - through this publication, including any columns added later. If a column\n> - list is specified, it must include the replica identity columns.\n> + through this publication, including any columns added later.\n\nModified as suggested.\n\n>\n> 2) The document says that \"if the table uses REPLICA IDENTITY FULL,\n> specifying a column list is not allowed.\":\n> + publishes only <command>INSERT</command> operations. Furthermore, if the\n> + table uses <literal>REPLICA IDENTITY FULL</literal>, specifying a column\n> + list is not allowed.\n> + </para>\n>\n> Did you mean specifying a column list during create publication for\n> REPLICA IDENTITY FULL table like below scenario:\n> postgres=# create table t2(c1 int, c2 int, c3 int);\n> CREATE TABLE\n> postgres=# alter table t2 replica identity full ;\n> ALTER TABLE\n> postgres=# create publication pub1 for table t2(c1,c2);\n> CREATE PUBLICATION\n>\n> If so, the document says specifying column list is not allowed, but\n> creating a publication with column list on replica identity full was\n> successful.\n\nThat patch v1-0001 was using the same wording from the github commit\nmessage [1]. I agree it was a bit vague.\n\nIn fact the replica identity validation is done at DML execution time\nso your example will fail as expected when you attempt to do a UPDATE\noperation.\n\ne.g.\ntest_pub=# update t2 set c2=23 where c1=1;\nERROR: cannot update table \"t2\"\nDETAIL: Column list used by the publication does not cover the\nreplica identity.\n\nI modified the wording for this part of the docs.\n\n~~~\n\nPSA new set of v2* patches.\n\n------\n[1] - https://github.com/postgres/postgres/commit/923def9a533a7d986acfb524139d8b9e5466d0a5\n\nKind Regards,\nPeter Smith\nFujitsu Australia", "msg_date": "Mon, 22 Aug 2022 18:27:43 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "Op 22-08-2022 om 10:27 schreef Peter Smith:\n> \n> PSA new set of v2* patches.\n\nHi,\n\nIn the second file a small typo, I think:\n\n\"enclosed by parenthesis\" should be\n\"enclosed by parentheses\"\n\nthanks,\nErik\n\n\n\n", "msg_date": "Mon, 22 Aug 2022 11:11:46 +0200", "msg_from": "Erik Rijkers <er@xs4all.nl>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Aug 22, 2022 at 1:58 PM Peter Smith <smithpb2250@gmail.com> wrote:\n>\n> Thanks for the view of v1-0001.\n>\n> On Wed, Aug 17, 2022 at 3:04 AM vignesh C <vignesh21@gmail.com> wrote:\n> ...\n> > 1) Row filters mentions that \"It has no effect on TRUNCATE commands.\",\n> > the same is not present in case of column filters. We should keep the\n> > changes similarly for consistency.\n> > --- a/doc/src/sgml/ref/create_publication.sgml\n> > +++ b/doc/src/sgml/ref/create_publication.sgml\n> > @@ -90,8 +90,7 @@ CREATE PUBLICATION <replaceable\n> > class=\"parameter\">name</replaceable>\n> > <para>\n> > When a column list is specified, only the named columns are replicated.\n> > If no column list is specified, all columns of the table are replicated\n> > - through this publication, including any columns added later. If a column\n> > - list is specified, it must include the replica identity columns.\n> > + through this publication, including any columns added later.\n>\n> Modified as suggested.\n>\n> >\n> > 2) The document says that \"if the table uses REPLICA IDENTITY FULL,\n> > specifying a column list is not allowed.\":\n> > + publishes only <command>INSERT</command> operations. Furthermore, if the\n> > + table uses <literal>REPLICA IDENTITY FULL</literal>, specifying a column\n> > + list is not allowed.\n> > + </para>\n> >\n> > Did you mean specifying a column list during create publication for\n> > REPLICA IDENTITY FULL table like below scenario:\n> > postgres=# create table t2(c1 int, c2 int, c3 int);\n> > CREATE TABLE\n> > postgres=# alter table t2 replica identity full ;\n> > ALTER TABLE\n> > postgres=# create publication pub1 for table t2(c1,c2);\n> > CREATE PUBLICATION\n> >\n> > If so, the document says specifying column list is not allowed, but\n> > creating a publication with column list on replica identity full was\n> > successful.\n>\n> That patch v1-0001 was using the same wording from the github commit\n> message [1]. I agree it was a bit vague.\n>\n> In fact the replica identity validation is done at DML execution time\n> so your example will fail as expected when you attempt to do a UPDATE\n> operation.\n>\n> e.g.\n> test_pub=# update t2 set c2=23 where c1=1;\n> ERROR: cannot update table \"t2\"\n> DETAIL: Column list used by the publication does not cover the\n> replica identity.\n>\n> I modified the wording for this part of the docs.\n\nFew comments:\n1) I felt no expressions are allowed in case of column filters. Only\ncolumn names can be specified. The second part of the sentence\nconfuses what is allowed and what is not allowed. Won't it be better\nto remove the second sentence and mention that only column names can\nbe specified.\n+ <para>\n+ Column list can contain only simple column references. Complex\n+ expressions, function calls etc. are not allowed.\n+ </para>\n\n2) tablename should be table name.\n+ <para>\n+ A column list is specified per table following the tablename, and\nenclosed by\n+ parenthesis. See <xref linkend=\"sql-createpublication\"/> for details.\n+ </para>\n\nWe have used table name in the same page in other instances like:\na) The row filter is defined per table. Use a WHERE clause after the\ntable name for each published table that requires data to be filtered\nout. The WHERE clause must be enclosed by parentheses.\nb) The tables are matched between the publisher and the subscriber\nusing the fully qualified table name.\n\n3) One small whitespace issue:\ngit am v2-0001-Column-List-replica-identity-rules.patch\nApplying: Column List replica identity rules.\n.git/rebase-apply/patch:30: trailing whitespace.\n if the publication publishes only <command>INSERT</command> operations.\nwarning: 1 line adds whitespace errors.\n\nRegards,\nVignesh\n\n\n", "msg_date": "Mon, 22 Aug 2022 16:55:19 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Aug 22, 2022 at 9:25 PM vignesh C <vignesh21@gmail.com> wrote:\n>\n...\n\n> Few comments:\n> 1) I felt no expressions are allowed in case of column filters. Only\n> column names can be specified. The second part of the sentence\n> confuses what is allowed and what is not allowed. Won't it be better\n> to remove the second sentence and mention that only column names can\n> be specified.\n> + <para>\n> + Column list can contain only simple column references. Complex\n> + expressions, function calls etc. are not allowed.\n> + </para>\n>\n\nThis wording was lifted verbatim from the commit message [1]. But I\nsee your point that it just seems to be overcomplicating a simple\nrule. Modified as suggested.\n\n> 2) tablename should be table name.\n> + <para>\n> + A column list is specified per table following the tablename, and\n> enclosed by\n> + parenthesis. See <xref linkend=\"sql-createpublication\"/> for details.\n> + </para>\n>\n> We have used table name in the same page in other instances like:\n> a) The row filter is defined per table. Use a WHERE clause after the\n> table name for each published table that requires data to be filtered\n> out. The WHERE clause must be enclosed by parentheses.\n> b) The tables are matched between the publisher and the subscriber\n> using the fully qualified table name.\n>\n\nFixed as suggested.\n\n> 3) One small whitespace issue:\n> git am v2-0001-Column-List-replica-identity-rules.patch\n> Applying: Column List replica identity rules.\n> .git/rebase-apply/patch:30: trailing whitespace.\n> if the publication publishes only <command>INSERT</command> operations.\n> warning: 1 line adds whitespace errors.\n>\n\nFixed.\n\n~~~\n\nPSA the v3* patch set.\n\n------\n[1] https://github.com/postgres/postgres/commit/923def9a533a7d986acfb524139d8b9e5466d0a5\n\nKind Regards,\nPeter Smith.\nFujitsu Australia", "msg_date": "Tue, 23 Aug 2022 12:22:00 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Aug 22, 2022 at 7:11 PM Erik Rijkers <er@xs4all.nl> wrote:\n>\n> Op 22-08-2022 om 10:27 schreef Peter Smith:\n> >\n> > PSA new set of v2* patches.\n>\n> Hi,\n>\n> In the second file a small typo, I think:\n>\n> \"enclosed by parenthesis\" should be\n> \"enclosed by parentheses\"\n>\n\nThanks for your feedback.\n\nFixed in the v3* patches [1].\n\n------\n[1] https://www.postgresql.org/message-id/CAHut%2BPtHgQbFs9DDeOoqqQLZmMBD8FQPK2WOXJpR1nyDQy8AGA%40mail.gmail.com\n\nKind Regards,\nPeter Smith.\nFujitsu Australia\n\n\n", "msg_date": "Tue, 23 Aug 2022 12:25:38 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, Aug 23, 2022 at 7:52 AM Peter Smith <smithpb2250@gmail.com> wrote:\n>\n> On Mon, Aug 22, 2022 at 9:25 PM vignesh C <vignesh21@gmail.com> wrote:\n> >\n> ...\n>\n> > Few comments:\n> > 1) I felt no expressions are allowed in case of column filters. Only\n> > column names can be specified. The second part of the sentence\n> > confuses what is allowed and what is not allowed. Won't it be better\n> > to remove the second sentence and mention that only column names can\n> > be specified.\n> > + <para>\n> > + Column list can contain only simple column references. Complex\n> > + expressions, function calls etc. are not allowed.\n> > + </para>\n> >\n>\n> This wording was lifted verbatim from the commit message [1]. But I\n> see your point that it just seems to be overcomplicating a simple\n> rule. Modified as suggested.\n>\n> > 2) tablename should be table name.\n> > + <para>\n> > + A column list is specified per table following the tablename, and\n> > enclosed by\n> > + parenthesis. See <xref linkend=\"sql-createpublication\"/> for details.\n> > + </para>\n> >\n> > We have used table name in the same page in other instances like:\n> > a) The row filter is defined per table. Use a WHERE clause after the\n> > table name for each published table that requires data to be filtered\n> > out. The WHERE clause must be enclosed by parentheses.\n> > b) The tables are matched between the publisher and the subscriber\n> > using the fully qualified table name.\n> >\n>\n> Fixed as suggested.\n>\n> > 3) One small whitespace issue:\n> > git am v2-0001-Column-List-replica-identity-rules.patch\n> > Applying: Column List replica identity rules.\n> > .git/rebase-apply/patch:30: trailing whitespace.\n> > if the publication publishes only <command>INSERT</command> operations.\n> > warning: 1 line adds whitespace errors.\n> >\n>\n> Fixed.\n>\n> ~~~\n>\n> PSA the v3* patch set.\n\nThanks for the updated patch.\nFew comments:\n1) We can shuffle the columns in publisher table and subscriber to\nshow that the order of the column does not matter\n+ <para>\n+ Create a publication <literal>p1</literal>. A column list is defined for\n+ table <literal>t1</literal> to reduce the number of columns that will be\n+ replicated.\n+<programlisting>\n+test_pub=# CREATE PUBLICATION p1 FOR TABLE t1 (id, a, b, c);\n+CREATE PUBLICATION\n+test_pub=#\n+</programlisting></para>\n\n2) We can try to keep the line content to less than 80 chars\n+ <para>\n+ A column list is specified per table following the tablename, and\nenclosed by\n+ parenthesis. See <xref linkend=\"sql-createpublication\"/> for details.\n+ </para>\n\n3) tablename should be table name like it is used in other places\n+ <para>\n+ A column list is specified per table following the tablename, and\nenclosed by\n+ parenthesis. See <xref linkend=\"sql-createpublication\"/> for details.\n+ </para>\n\n4a) In the below, you could include mentioning \"Only the column list\ndata of publication <literal>p1</literal> are replicated.\"\n+ <para>\n+ Insert some rows to table <literal>t1</literal>.\n+<programlisting>\n+test_pub=# INSERT INTO t1 VALUES(1, 'a-1', 'b-1', 'c-1', 'd-1', 'e-1');\n+INSERT 0 1\n\n4b) In the above, we could mention that the insert should be done on\nthe \"publisher side\" as the previous statements are executed on the\nsubscriber side.\n\nRegards,\nVignesh\n\n\n", "msg_date": "Thu, 25 Aug 2022 15:08:39 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Aug 25, 2022 at 7:38 PM vignesh C <vignesh21@gmail.com> wrote:\n>\n...\n> > PSA the v3* patch set.\n>\n> Thanks for the updated patch.\n> Few comments:\n> 1) We can shuffle the columns in publisher table and subscriber to\n> show that the order of the column does not matter\n> + <para>\n> + Create a publication <literal>p1</literal>. A column list is defined for\n> + table <literal>t1</literal> to reduce the number of columns that will be\n> + replicated.\n> +<programlisting>\n> +test_pub=# CREATE PUBLICATION p1 FOR TABLE t1 (id, a, b, c);\n> +CREATE PUBLICATION\n> +test_pub=#\n> +</programlisting></para>\n>\n\nOK. I made the following changes to the example.\n- now the subscriber table defines cols in a different order than that\nof the publisher table\n- now the publisher column list defines col names in a different order\nthan that of the table\n- now the column list avoids using only adjacent column names\n\n> 2) We can try to keep the line content to less than 80 chars\n> + <para>\n> + A column list is specified per table following the tablename, and\n> enclosed by\n> + parenthesis. See <xref linkend=\"sql-createpublication\"/> for details.\n> + </para>\n>\n\nOK. Modified to use < 80 chars\n\n> 3) tablename should be table name like it is used in other places\n> + <para>\n> + A column list is specified per table following the tablename, and\n> enclosed by\n> + parenthesis. See <xref linkend=\"sql-createpublication\"/> for details.\n> + </para>\n>\n\nSorry, I don't see this problem. AFAIK this same issue was already\nfixed in the v3* patches. Notice in the cited fragment that\n'parenthesis' is misspelt but that was also fixed in v3. Maybe you are\nlooking at an old patch file (??)\n\n> 4a) In the below, you could include mentioning \"Only the column list\n> data of publication <literal>p1</literal> are replicated.\"\n> + <para>\n> + Insert some rows to table <literal>t1</literal>.\n> +<programlisting>\n> +test_pub=# INSERT INTO t1 VALUES(1, 'a-1', 'b-1', 'c-1', 'd-1', 'e-1');\n> +INSERT 0 1\n>\n\nOK. Modified to say this.\n\n> 4b) In the above, we could mention that the insert should be done on\n> the \"publisher side\" as the previous statements are executed on the\n> subscriber side.\n\nOK. Modified to say this.\n\n~~~\n\nThanks for the feedback.\n\nPSA patch set v4* where all of the above comments are now addressed.\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia", "msg_date": "Fri, 26 Aug 2022 12:02:57 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Aug 26, 2022 at 7:33 AM Peter Smith <smithpb2250@gmail.com> wrote:\n>\n\nFew comments on both the patches:\nv4-0001*\n=========\n1.\nFurthermore, if the table uses\n+ <literal>REPLICA IDENTITY FULL</literal>, specifying a column list is not\n+ allowed (it will cause publication errors for <command>UPDATE</command> or\n+ <command>DELETE</command> operations).\n\nThis line sounds a bit unclear to me. From this like it appears that\nthe following operation is not allowed:\n\npostgres=# create table t1(c1 int, c2 int, c3 int);\nCREATE TABLE\npostgres=# Alter Table t1 replica identity full;\nALTER TABLE\npostgres=# create publication pub1 for table t1(c1);\nCREATE PUBLICATION\n\nHowever, what is not allowed is the following:\npostgres=# delete from t1;\nERROR: cannot delete from table \"t1\"\nDETAIL: Column list used by the publication does not cover the\nreplica identity.\n\nI am not sure if we really need this line but if so then please try to\nmake it more clear. I think the similar text is present in 0002 patch\nwhich should be modified accordingly.\n\nV4-0002*\n=========\n2.\nHowever, if a\n+ <firstterm>column list</firstterm> is specified then only the columns named\n+ in the list will be replicated. This means the subscriber-side table only\n+ needs to have those columns named by the column list. A user might choose to\n+ use column lists for behavioral, security or performance reasons.\n+ </para>\n+\n+ <sect2 id=\"logical-replication-col-list-rules\">\n+ <title>Column List Rules</title>\n+\n+ <para>\n+ A column list is specified per table following the table name, and enclosed\n+ by parentheses. See <xref linkend=\"sql-createpublication\"/> for details.\n+ </para>\n+\n+ <para>\n+ When a column list is specified, only the named columns are replicated.\n+ The list order is not important.\n\nIt seems like \"When a column list is specified, only the named columns\nare replicated.\" is almost a duplicate of the line in the first para.\nSo, I think we can remove it. And if we do so then the second line\ncould be changed to something like: \"While specifying column list, the\norder of columns is not important.\"\n\n3. It seems information about initial table synchronization is\nmissing. We copy only columns specified in the column list. Also, it\nwould be good to add a Note similar to Row Filter to indicate that\nthis list won't be used by pre-15 publishers.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 1 Sep 2022 15:23:30 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Thu, Sep 1, 2022 at 7:53 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Fri, Aug 26, 2022 at 7:33 AM Peter Smith <smithpb2250@gmail.com> wrote:\n> >\n>\n> Few comments on both the patches:\n> v4-0001*\n> =========\n> 1.\n> Furthermore, if the table uses\n> + <literal>REPLICA IDENTITY FULL</literal>, specifying a column list is not\n> + allowed (it will cause publication errors for <command>UPDATE</command> or\n> + <command>DELETE</command> operations).\n>\n> This line sounds a bit unclear to me. From this like it appears that\n> the following operation is not allowed:\n>\n> postgres=# create table t1(c1 int, c2 int, c3 int);\n> CREATE TABLE\n> postgres=# Alter Table t1 replica identity full;\n> ALTER TABLE\n> postgres=# create publication pub1 for table t1(c1);\n> CREATE PUBLICATION\n>\n> However, what is not allowed is the following:\n> postgres=# delete from t1;\n> ERROR: cannot delete from table \"t1\"\n> DETAIL: Column list used by the publication does not cover the\n> replica identity.\n>\n> I am not sure if we really need this line but if so then please try to\n> make it more clear. I think the similar text is present in 0002 patch\n> which should be modified accordingly.\n>\n\nThe \"Furthermore…\" sentence came from the commit message [1]. But I\nagree it seems redundant/ambiguous, so I have removed it (and removed\nthe same in patch 0002).\n\n\n> V4-0002*\n> =========\n> 2.\n> However, if a\n> + <firstterm>column list</firstterm> is specified then only the columns named\n> + in the list will be replicated. This means the subscriber-side table only\n> + needs to have those columns named by the column list. A user might choose to\n> + use column lists for behavioral, security or performance reasons.\n> + </para>\n> +\n> + <sect2 id=\"logical-replication-col-list-rules\">\n> + <title>Column List Rules</title>\n> +\n> + <para>\n> + A column list is specified per table following the table name, and enclosed\n> + by parentheses. See <xref linkend=\"sql-createpublication\"/> for details.\n> + </para>\n> +\n> + <para>\n> + When a column list is specified, only the named columns are replicated.\n> + The list order is not important.\n>\n> It seems like \"When a column list is specified, only the named columns\n> are replicated.\" is almost a duplicate of the line in the first para.\n> So, I think we can remove it. And if we do so then the second line\n> could be changed to something like: \"While specifying column list, the\n> order of columns is not important.\"\n>\n\nModified as suggested.\n\n> 3. It seems information about initial table synchronization is\n> missing. We copy only columns specified in the column list. Also, it\n> would be good to add a Note similar to Row Filter to indicate that\n> this list won't be used by pre-15 publishers.\n>\n\nDone as suggested. Added a new \"Initial Data Synchronization\" section\nwith content similar to that of the Row Filters section.\n\n~~~\n\nThanks for your review comments.\n\nPSA v5* patches where all the above have been addressed.\n\n------\n[1] https://github.com/postgres/postgres/commit/923def9a533a7d986acfb524139d8b9e5466d0a5\n\nKind Regards,\nPeter Smith.\nFujitsu Australia", "msg_date": "Fri, 2 Sep 2022 13:15:33 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Sep 2, 2022 at 8:45 AM Peter Smith <smithpb2250@gmail.com> wrote:\n>\n> On Thu, Sep 1, 2022 at 7:53 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >\n> > On Fri, Aug 26, 2022 at 7:33 AM Peter Smith <smithpb2250@gmail.com> wrote:\n> > >\n> >\n> > Few comments on both the patches:\n> > v4-0001*\n> > =========\n> > 1.\n> > Furthermore, if the table uses\n> > + <literal>REPLICA IDENTITY FULL</literal>, specifying a column list is not\n> > + allowed (it will cause publication errors for <command>UPDATE</command> or\n> > + <command>DELETE</command> operations).\n> >\n> > This line sounds a bit unclear to me. From this like it appears that\n> > the following operation is not allowed:\n> >\n> > postgres=# create table t1(c1 int, c2 int, c3 int);\n> > CREATE TABLE\n> > postgres=# Alter Table t1 replica identity full;\n> > ALTER TABLE\n> > postgres=# create publication pub1 for table t1(c1);\n> > CREATE PUBLICATION\n> >\n> > However, what is not allowed is the following:\n> > postgres=# delete from t1;\n> > ERROR: cannot delete from table \"t1\"\n> > DETAIL: Column list used by the publication does not cover the\n> > replica identity.\n> >\n> > I am not sure if we really need this line but if so then please try to\n> > make it more clear. I think the similar text is present in 0002 patch\n> > which should be modified accordingly.\n> >\n>\n> The \"Furthermore…\" sentence came from the commit message [1]. But I\n> agree it seems redundant/ambiguous, so I have removed it (and removed\n> the same in patch 0002).\n>\n\nThanks, pushed your first patch.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 2 Sep 2022 19:10:02 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Fri, Sep 2, 2022 at 11:40 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Fri, Sep 2, 2022 at 8:45 AM Peter Smith <smithpb2250@gmail.com> wrote:\n> >\n> > On Thu, Sep 1, 2022 at 7:53 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> > >\n> > > On Fri, Aug 26, 2022 at 7:33 AM Peter Smith <smithpb2250@gmail.com> wrote:\n> > > >\n> > >\n> > > Few comments on both the patches:\n> > > v4-0001*\n> > > =========\n> > > 1.\n> > > Furthermore, if the table uses\n> > > + <literal>REPLICA IDENTITY FULL</literal>, specifying a column list is not\n> > > + allowed (it will cause publication errors for <command>UPDATE</command> or\n> > > + <command>DELETE</command> operations).\n> > >\n> > > This line sounds a bit unclear to me. From this like it appears that\n> > > the following operation is not allowed:\n> > >\n> > > postgres=# create table t1(c1 int, c2 int, c3 int);\n> > > CREATE TABLE\n> > > postgres=# Alter Table t1 replica identity full;\n> > > ALTER TABLE\n> > > postgres=# create publication pub1 for table t1(c1);\n> > > CREATE PUBLICATION\n> > >\n> > > However, what is not allowed is the following:\n> > > postgres=# delete from t1;\n> > > ERROR: cannot delete from table \"t1\"\n> > > DETAIL: Column list used by the publication does not cover the\n> > > replica identity.\n> > >\n> > > I am not sure if we really need this line but if so then please try to\n> > > make it more clear. I think the similar text is present in 0002 patch\n> > > which should be modified accordingly.\n> > >\n> >\n> > The \"Furthermore…\" sentence came from the commit message [1]. But I\n> > agree it seems redundant/ambiguous, so I have removed it (and removed\n> > the same in patch 0002).\n> >\n>\n> Thanks, pushed your first patch.\n>\n\nThanks for the push.\n\nI have rebased the remaining patch (v6-0001 is the same as v5-0002)\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia", "msg_date": "Mon, 5 Sep 2022 10:28:11 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Sep 5, 2022 8:28 AM Peter Smith <smithpb2250@gmail.com> wrote:\r\n> \r\n> I have rebased the remaining patch (v6-0001 is the same as v5-0002)\r\n> \r\n\r\nThanks for updating the patch. Here are some comments.\r\n\r\n1.\r\n+ the <xref linkend=\"sql-alterpublication\"/> will be successful but later\r\n+ the WalSender on the publisher, or the subscriber may throw an error. In\r\n+ this scenario, the user needs to recreate the subscription after adjusting\r\n\r\nShould \"WalSender\" be changed to \"walsender\"? I saw \"walsender\" is used in other\r\nplaces in the documentation.\r\n\r\n2.\r\n+test_pub=# CREATE TABLE t1(id int, a text, b text, c text, d text, e text, PRIMARY KEY(id));\r\n+CREATE TABLE\r\n+test_pub=#\r\n\r\n+test_pub=# CREATE PUBLICATION p1 FOR TABLE t1 (id, b, a, d);\r\n+CREATE PUBLICATION\r\n+test_pub=#\r\n\r\nI think the redundant \"test_pub=#\" can be removed.\r\n\r\n\r\nBesides, I tested the examples in the patch, there's no problem.\r\n\r\nRegards,\r\nShi yu\r\n", "msg_date": "Mon, 5 Sep 2022 03:42:40 +0000", "msg_from": "\"shiy.fnst@fujitsu.com\" <shiy.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Sep 5, 2022 at 1:42 PM shiy.fnst@fujitsu.com\n<shiy.fnst@fujitsu.com> wrote:\n>\n> On Mon, Sep 5, 2022 8:28 AM Peter Smith <smithpb2250@gmail.com> wrote:\n> >\n> > I have rebased the remaining patch (v6-0001 is the same as v5-0002)\n> >\n>\n> Thanks for updating the patch. Here are some comments.\n>\n> 1.\n> + the <xref linkend=\"sql-alterpublication\"/> will be successful but later\n> + the WalSender on the publisher, or the subscriber may throw an error. In\n> + this scenario, the user needs to recreate the subscription after adjusting\n>\n> Should \"WalSender\" be changed to \"walsender\"? I saw \"walsender\" is used in other\n> places in the documentation.\n\nModified.\n\n>\n> 2.\n> +test_pub=# CREATE TABLE t1(id int, a text, b text, c text, d text, e text, PRIMARY KEY(id));\n> +CREATE TABLE\n> +test_pub=#\n>\n> +test_pub=# CREATE PUBLICATION p1 FOR TABLE t1 (id, b, a, d);\n> +CREATE PUBLICATION\n> +test_pub=#\n>\n> I think the redundant \"test_pub=#\" can be removed.\n>\n\nModified.\n\n>\n> Besides, I tested the examples in the patch, there's no problem.\n>\n\nThanks for the review comments, and testing.\n\nI made both fixes as suggested.\n\nPSA v7.\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia", "msg_date": "Mon, 5 Sep 2022 20:16:09 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Sep 5, 2022 at 3:46 PM Peter Smith <smithpb2250@gmail.com> wrote:\n>\n>\n> PSA v7.\n>\n\nFor example, if additional columns are added to the table, then\n+ (after a <literal>REFRESH PUBLICATION</literal>) if there was a column list\n+ only those named columns will continue to be replicated.\n\nThis looks a bit unclear to me w.r.t the refresh publication step. Why\nexactly you have used refresh publication in the above para? It is\nused to add new tables if any added to the publication, so not clear\nto me how it helps in this case. If that is not required then we can\nchange it to: \"For example, if additional columns are added to the\ntable then only those named columns mentioned in the column list will\ncontinue to be replicated.\"\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 5 Sep 2022 16:16:10 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Mon, Sep 5, 2022 at 8:46 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Mon, Sep 5, 2022 at 3:46 PM Peter Smith <smithpb2250@gmail.com> wrote:\n> >\n> >\n> > PSA v7.\n> >\n>\n> For example, if additional columns are added to the table, then\n> + (after a <literal>REFRESH PUBLICATION</literal>) if there was a column list\n> + only those named columns will continue to be replicated.\n>\n> This looks a bit unclear to me w.r.t the refresh publication step. Why\n> exactly you have used refresh publication in the above para? It is\n> used to add new tables if any added to the publication, so not clear\n> to me how it helps in this case. If that is not required then we can\n> change it to: \"For example, if additional columns are added to the\n> table then only those named columns mentioned in the column list will\n> continue to be replicated.\"\n>\n\nYou are right - that REFRESH PUBLICATION was not necessary for this\nexample. The patch is modified to use your suggested text.\n\nPSA v8\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia", "msg_date": "Tue, 6 Sep 2022 09:37:45 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, Sep 6, 2022 at 5:08 AM Peter Smith <smithpb2250@gmail.com> wrote:\n>\n> You are right - that REFRESH PUBLICATION was not necessary for this\n> example. The patch is modified to use your suggested text.\n>\n> PSA v8\n>\n\nLGTM. I'll push this once the tag appears for v15.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Tue, 6 Sep 2022 14:15:22 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Tue, Sep 6, 2022 at 2:15 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Tue, Sep 6, 2022 at 5:08 AM Peter Smith <smithpb2250@gmail.com> wrote:\n> >\n> > You are right - that REFRESH PUBLICATION was not necessary for this\n> > example. The patch is modified to use your suggested text.\n> >\n> > PSA v8\n> >\n>\n> LGTM. I'll push this once the tag appears for v15.\n>\n\nPushed!\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 7 Sep 2022 16:18:48 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" }, { "msg_contents": "On Wed, Sep 7, 2022 at 8:49 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Tue, Sep 6, 2022 at 2:15 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >\n> > On Tue, Sep 6, 2022 at 5:08 AM Peter Smith <smithpb2250@gmail.com> wrote:\n> > >\n> > > You are right - that REFRESH PUBLICATION was not necessary for this\n> > > example. The patch is modified to use your suggested text.\n> > >\n> > > PSA v8\n> > >\n> >\n> > LGTM. I'll push this once the tag appears for v15.\n> >\n>\n> Pushed!\n\nThanks for pushing.\n\n------\nKind Regards,\nPeter Smith.\nFujitsu Australia.\n\n\n", "msg_date": "Thu, 8 Sep 2022 08:31:52 +1000", "msg_from": "Peter Smith <smithpb2250@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Column Filtering in Logical Replication" } ]
[ { "msg_contents": "The Core Team would like to extend our congratulations to\nDaniel Gustafsson and John Naylor, who have accepted invitations\nto become our newest Postgres committers.\n\nPlease join me in wishing them much success and few bugs.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 30 Jun 2021 16:43:58 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "New committers: Daniel Gustafsson and John Naylor" }, { "msg_contents": "On Wed, Jun 30, 2021 at 04:43:58PM -0400, Tom Lane wrote:\n> The Core Team would like to extend our congratulations to\n> Daniel Gustafsson and John Naylor, who have accepted invitations\n> to become our newest Postgres committers.\n> \n> Please join me in wishing them much success and few bugs.\n\nCongratulations to Daniel and John! May the future not turn the\nbuildfarm red.\n--\nMichael", "msg_date": "Thu, 1 Jul 2021 06:48:39 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: New committers: Daniel Gustafsson and John Naylor" }, { "msg_contents": "On Thu, 1 Jul 2021 at 09:48, Michael Paquier <michael@paquier.xyz> wrote:\n> Congratulations to Daniel and John!\n\n+1. Well deserved!\n\nDavid\n\n\n", "msg_date": "Thu, 1 Jul 2021 10:47:41 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: New committers: Daniel Gustafsson and John Naylor" }, { "msg_contents": "On Wed, Jun 30, 2021 at 11:44 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> The Core Team would like to extend our congratulations to\n> Daniel Gustafsson and John Naylor, who have accepted invitations\n> to become our newest Postgres committers.\n>\n> Please join me in wishing them much success and few bugs.\n\nCongratulations to Daniel and John! Well deserved promotion!\n\n------\nRegards,\nAlexander Korotkov\n\n\n", "msg_date": "Thu, 1 Jul 2021 02:12:41 +0300", "msg_from": "Alexander Korotkov <aekorotkov@gmail.com>", "msg_from_op": false, "msg_subject": "Re: New committers: Daniel Gustafsson and John Naylor" }, { "msg_contents": "\nOn 6/30/21 4:43 PM, Tom Lane wrote:\n> The Core Team would like to extend our congratulations to\n> Daniel Gustafsson and John Naylor, who have accepted invitations\n> to become our newest Postgres committers.\n>\n> Please join me in wishing them much success and few bugs.\n>\n> \t\t\t\n\n\nWell deserved, welcome to both.\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Wed, 30 Jun 2021 19:13:16 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: New committers: Daniel Gustafsson and John Naylor" }, { "msg_contents": "On Thu, Jul 1, 2021 at 2:14 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> The Core Team would like to extend our congratulations to\n> Daniel Gustafsson and John Naylor, who have accepted invitations\n> to become our newest Postgres committers.\n>\n\nMany congratulations to Daniel and John!\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 1 Jul 2021 08:39:10 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: New committers: Daniel Gustafsson and John Naylor" }, { "msg_contents": "On Thu, Jul 1, 2021 at 2:14 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> The Core Team would like to extend our congratulations to\n> Daniel Gustafsson and John Naylor, who have accepted invitations\n> to become our newest Postgres committers.\n>\n\nMany congratulations to Daniel & John !\n\nRegards,\nAmul\n\n\n", "msg_date": "Thu, 1 Jul 2021 09:20:42 +0530", "msg_from": "Amul Sul <sulamul@gmail.com>", "msg_from_op": false, "msg_subject": "Re: New committers: Daniel Gustafsson and John Naylor" }, { "msg_contents": "Le mer. 30 juin 2021 à 22:44, Tom Lane <tgl@sss.pgh.pa.us> a écrit :\n\n> The Core Team would like to extend our congratulations to\n> Daniel Gustafsson and John Naylor, who have accepted invitations\n> to become our newest Postgres committers.\n>\n> Please join me in wishing them much success and few bugs.\n>\n>\nCongrats to both.\n\nI guess https://wiki.postgresql.org/wiki/Committers needs to be updated ;)\n\n\n-- \nGuillaume.\n\nLe mer. 30 juin 2021 à 22:44, Tom Lane <tgl@sss.pgh.pa.us> a écrit :The Core Team would like to extend our congratulations to\nDaniel Gustafsson and John Naylor, who have accepted invitations\nto become our newest Postgres committers.\n\nPlease join me in wishing them much success and few bugs.\nCongrats to both.I guess https://wiki.postgresql.org/wiki/Committers needs to be updated ;)-- Guillaume.", "msg_date": "Thu, 1 Jul 2021 09:05:58 +0200", "msg_from": "Guillaume Lelarge <guillaume@lelarge.info>", "msg_from_op": false, "msg_subject": "Re: New committers: Daniel Gustafsson and John Naylor" }, { "msg_contents": "\n\n> 1 июля 2021 г., в 01:43, Tom Lane <tgl@sss.pgh.pa.us> написал(а):\n> \n> The Core Team would like to extend our congratulations to\n> Daniel Gustafsson and John Naylor, who have accepted invitations\n> to become our newest Postgres committers.\n> \n> Please join me in wishing them much success and few bugs.\n\nCongratulations to Daniel and John! Many features with few bugs :)\n\nBest regards, Andrey Borodin.\n\n", "msg_date": "Thu, 1 Jul 2021 12:14:08 +0500", "msg_from": "Andrey Borodin <x4mmm@yandex-team.ru>", "msg_from_op": false, "msg_subject": "Re: New committers: Daniel Gustafsson and John Naylor" }, { "msg_contents": "On Thu, Jul 1, 2021 at 9:06 AM Guillaume Lelarge <guillaume@lelarge.info> wrote:\n>\n> Le mer. 30 juin 2021 à 22:44, Tom Lane <tgl@sss.pgh.pa.us> a écrit :\n>>\n>> The Core Team would like to extend our congratulations to\n>> Daniel Gustafsson and John Naylor, who have accepted invitations\n>> to become our newest Postgres committers.\n>>\n>> Please join me in wishing them much success and few bugs.\n>>\n>\n> Congrats to both.\n>\n> I guess https://wiki.postgresql.org/wiki/Committers needs to be updated ;)\n\nWe have a rather long checklist to work through, and that one is near\nthe end - so we'll get there. But thanks for the reminder to check\nthat it was on the checklist :)\n\n-- \n Magnus Hagander\n Me: https://www.hagander.net/\n Work: https://www.redpill-linpro.com/\n\n\n", "msg_date": "Thu, 1 Jul 2021 09:29:57 +0200", "msg_from": "Magnus Hagander <magnus@hagander.net>", "msg_from_op": false, "msg_subject": "Re: New committers: Daniel Gustafsson and John Naylor" }, { "msg_contents": "On Wed, Jun 30, 2021 at 04:43:58PM -0400, Tom Lane wrote:\n> The Core Team would like to extend our congratulations to\n> Daniel Gustafsson and John Naylor, who have accepted invitations\n> to become our newest Postgres committers.\n> \n> Please join me in wishing them much success and few bugs.\n> \n\nCongrats Daniel and John. Keep your good work!\n\n-- \nJaime Casanova\nDirector de Servicios Profesionales\nSystemGuards - Consultores de PostgreSQL\n\n\n", "msg_date": "Thu, 1 Jul 2021 09:52:57 -0500", "msg_from": "Jaime Casanova <jcasanov@systemguards.com.ec>", "msg_from_op": false, "msg_subject": "Re: New committers: Daniel Gustafsson and John Naylor" }, { "msg_contents": "On 6/30/21 4:43 PM, Tom Lane wrote:\n> The Core Team would like to extend our congratulations to\n> Daniel Gustafsson and John Naylor, who have accepted invitations\n> to become our newest Postgres committers.\n>\n> Please join me in wishing them much success and few bugs.\n>\n> \t\t\tregards, tom lane\n\n\nCongrats to you both !\n\n\nBest regards,\n\n  Jesper\n\n\n\n\n", "msg_date": "Thu, 1 Jul 2021 10:54:28 -0400", "msg_from": "Jesper Pedersen <jesper.pedersen@redhat.com>", "msg_from_op": false, "msg_subject": "Re: New committers: Daniel Gustafsson and John Naylor" }, { "msg_contents": "On Thu, Jul 1, 2021 at 11:54 PM Jesper Pedersen\n<jesper.pedersen@redhat.com> wrote:\n> On 6/30/21 4:43 PM, Tom Lane wrote:\n> > The Core Team would like to extend our congratulations to\n> > Daniel Gustafsson and John Naylor, who have accepted invitations\n> > to become our newest Postgres committers.\n> >\n> > Please join me in wishing them much success and few bugs.\n> >\n> > regards, tom lane\n>\n> Congrats to you both !\n\n+1, congrats Daniel & John!\n\n-- \nAmit Langote\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 2 Jul 2021 09:47:32 +0900", "msg_from": "Amit Langote <amitlangote09@gmail.com>", "msg_from_op": false, "msg_subject": "Re: New committers: Daniel Gustafsson and John Naylor" } ]
[ { "msg_contents": "(Re-sending this email, because the Commitfest app mistakenly [3]\nconsidered previous email [4] to be part of the old thread, whereas it\nshould not be considered that way)\n\nI came across this thread [1] to disallow canceling a transaction not\nyet confirmed by a synchronous replica. I think my proposed patch\nmight help that case as well, hence adding all involved in that thread\nto BCC, for one-time notification.\n\nAs mentioned in that thread, when sending a cancellation signal, the\nclient cannot be sure if the cancel signal was honored, and if the\ntransaction was cancelled successfully. In the attached patch, the\nbackend emits a NotificationResponse containing the current full\ntransaction id. It does so only if the relevant GUC is enabled, and\nwhen the top-transaction is being assigned the ID.\n\nThis information can be useful to the client, when:\ni) it wants to cancel a transaction _after_ issuing a COMMIT, and\nii) it wants to check the status of its transaction that it sent\nCOMMIT for, but never received a response (perhaps because the server\ncrashed).\n\nAdditionally, this information can be useful for middleware, like\nTransaction Processing Monitors, which can now transparently (without\nany change in application code) monitor the status of transactions (by\nwatching for the transaction status indicator in the ReadyForQuery\nprotocol message). They can use the transaction ID from the\nNotificationResponse to open a watcher, and on seeing either an 'E' or\n'I' payload in subsequent ReadyForQuery messages, close the watcher.\nOn server crash, or other adverse events, they can then use the\ntransaction IDs still being watched to check status of those\ntransactions, and take appropriate actions, e.g. retry any aborted\ntransactions.\n\nWe cannot use the elog() mechanism for this notification because it is\nsensitive to the value of client_min_messages. Hence I used the NOTIFY\ninfrastructure for this message. I understand that this usage violates\nsome expectations as to how NOTIFY messages are supposed to behave\n(see [2] below), but I think these are acceptable violations; open to\nhearing if/why this might not be acceptable, and any possible\nalternatives.\n\nI'm not very familiar with the parallel workers infrastructure, so the\npatch is missing any consideration for those.\n\nReviews welcome.\n\n[1]: subject was: Re: Disallow cancellation of waiting for synchronous\nreplication\nthread: https://www.postgresql.org/message-id/flat/C1F7905E-5DB2-497D-ABCC-E14D4DEE506C%40yandex-team.ru\n\n[2]:\n At present, NotificationResponse can only be sent outside a\n transaction, and thus it will not occur in the middle of a\n command-response series, though it might occur just before ReadyForQuery.\n It is unwise to design frontend logic that assumes that, however.\n Good practice is to be able to accept NotificationResponse at any\n point in the protocol.\n\n[3]:\n\nSee Emails section in https://commitfest.postgresql.org/33/3198/\n\nThe email [4] is considered a continuation of a previous thread, _and_\nthe 'Latest attachment' entry points to a different email, even though\nmy email [4] contained a patch.\n\n[4]: https://www.postgresql.org/message-id/CABwTF4VS+HVm11XRE_Yv0vGmG=5kpYdx759RyJEp9F+fiLTU=Q@mail.gmail.com\n\nBest regards,\n--\nGurjeet Singh http://gurjeet.singh.im/\n\n\n", "msg_date": "Wed, 30 Jun 2021 17:56:42 -0700", "msg_from": "Gurjeet Singh <gurjeet@singh.im>", "msg_from_op": true, "msg_subject": "Automatic notification of top transaction IDs" }, { "msg_contents": "The proposed patch is attached.\n\nBest regards,\n--\nGurjeet Singh http://gurjeet.singh.im/\n\nOn Wed, Jun 30, 2021 at 5:56 PM Gurjeet Singh <gurjeet@singh.im> wrote:\n>\n> (Re-sending this email, because the Commitfest app mistakenly [3]\n> considered previous email [4] to be part of the old thread, whereas it\n> should not be considered that way)\n>\n> I came across this thread [1] to disallow canceling a transaction not\n> yet confirmed by a synchronous replica. I think my proposed patch\n> might help that case as well, hence adding all involved in that thread\n> to BCC, for one-time notification.\n>\n> As mentioned in that thread, when sending a cancellation signal, the\n> client cannot be sure if the cancel signal was honored, and if the\n> transaction was cancelled successfully. In the attached patch, the\n> backend emits a NotificationResponse containing the current full\n> transaction id. It does so only if the relevant GUC is enabled, and\n> when the top-transaction is being assigned the ID.\n>\n> This information can be useful to the client, when:\n> i) it wants to cancel a transaction _after_ issuing a COMMIT, and\n> ii) it wants to check the status of its transaction that it sent\n> COMMIT for, but never received a response (perhaps because the server\n> crashed).\n>\n> Additionally, this information can be useful for middleware, like\n> Transaction Processing Monitors, which can now transparently (without\n> any change in application code) monitor the status of transactions (by\n> watching for the transaction status indicator in the ReadyForQuery\n> protocol message). They can use the transaction ID from the\n> NotificationResponse to open a watcher, and on seeing either an 'E' or\n> 'I' payload in subsequent ReadyForQuery messages, close the watcher.\n> On server crash, or other adverse events, they can then use the\n> transaction IDs still being watched to check status of those\n> transactions, and take appropriate actions, e.g. retry any aborted\n> transactions.\n>\n> We cannot use the elog() mechanism for this notification because it is\n> sensitive to the value of client_min_messages. Hence I used the NOTIFY\n> infrastructure for this message. I understand that this usage violates\n> some expectations as to how NOTIFY messages are supposed to behave\n> (see [2] below), but I think these are acceptable violations; open to\n> hearing if/why this might not be acceptable, and any possible\n> alternatives.\n>\n> I'm not very familiar with the parallel workers infrastructure, so the\n> patch is missing any consideration for those.\n>\n> Reviews welcome.\n>\n> [1]: subject was: Re: Disallow cancellation of waiting for synchronous\n> replication\n> thread: https://www.postgresql.org/message-id/flat/C1F7905E-5DB2-497D-ABCC-E14D4DEE506C%40yandex-team.ru\n>\n> [2]:\n> At present, NotificationResponse can only be sent outside a\n> transaction, and thus it will not occur in the middle of a\n> command-response series, though it might occur just before ReadyForQuery.\n> It is unwise to design frontend logic that assumes that, however.\n> Good practice is to be able to accept NotificationResponse at any\n> point in the protocol.\n>\n> [3]:\n>\n> See Emails section in https://commitfest.postgresql.org/33/3198/\n>\n> The email [4] is considered a continuation of a previous thread, _and_\n> the 'Latest attachment' entry points to a different email, even though\n> my email [4] contained a patch.\n>\n> [4]: https://www.postgresql.org/message-id/CABwTF4VS+HVm11XRE_Yv0vGmG=5kpYdx759RyJEp9F+fiLTU=Q@mail.gmail.com\n>\n> Best regards,\n> --\n> Gurjeet Singh http://gurjeet.singh.im/", "msg_date": "Wed, 30 Jun 2021 18:11:19 -0700", "msg_from": "Gurjeet Singh <gurjeet@singh.im>", "msg_from_op": true, "msg_subject": "Re: Automatic notification of top transaction IDs" }, { "msg_contents": "On Thu, Jul 1, 2021 at 6:41 AM Gurjeet Singh <gurjeet@singh.im> wrote:\n>\n> The proposed patch is attached.\n\nThere is one compilation warning:\nxid.c:165:1: warning: no previous prototype for\n‘FullTransactionIdToStr’ [-Wmissing-prototypes]\n 165 | FullTransactionIdToStr(FullTransactionId fxid)\n | ^~~~~~~~~~~~~~~~~~~~~~\n\nThere are few compilation issues in documentation:\n/usr/bin/xmllint --path . --noout --valid postgres.sgml\nprotocol.sgml:1327: parser error : Opening and ending tag mismatch:\nliteral line 1322 and para\n </para>\n ^\nprotocol.sgml:1339: parser error : Opening and ending tag mismatch:\nliteral line 1322 and sect2\n </sect2>\n ^\nprotocol.sgml:1581: parser error : Opening and ending tag mismatch:\npara line 1322 and sect1\n </sect1>\n ^\nprotocol.sgml:7893: parser error : Opening and ending tag mismatch:\nsect2 line 1322 and chapter\n</chapter>\n ^\nprotocol.sgml:7894: parser error : chunk is not well balanced\n\n^\npostgres.sgml:253: parser error : Failure to process entity protocol\n &protocol;\n ^\npostgres.sgml:253: parser error : Entity 'protocol' not defined\n &protocol;\n ^\n\nRegards,\nVignesh\n\n\n", "msg_date": "Thu, 22 Jul 2021 10:58:12 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Automatic notification of top transaction IDs" }, { "msg_contents": "Greetings,\n\nI simply tested it and it works well. But I got a compilation warning,\nshould we move the definition of function FullTransactionIdToStr to the\n\"transam.h\"?\n\n-- \nThere is no royal road to learning.\nHighGo Software Co.\n\nGreetings, I simply tested it and it works well. But I got a compilation warning, should we move the definition of function FullTransactionIdToStr to the \"transam.h\"?-- There is no royal road to learning.HighGo Software Co.", "msg_date": "Tue, 27 Jul 2021 17:18:01 +0800", "msg_from": "Neil Chen <carpenter.nail.cz@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Automatic notification of top transaction IDs" }, { "msg_contents": "The following review has been posted through the commitfest application:\nmake installcheck-world: tested, passed\nImplements feature: tested, passed\nSpec compliant: tested, passed\nDocumentation: tested, passed\n\nHello\r\n\r\nThis patch applies fine to master branch and the regression tests are passing. \r\n\r\nRegarding the parallel worker case, the AssignTransactionId() function is already handling that and it will error out if IsParallelWorker() is true. In a normal case, this function is only called by the main backend, and the parallel workers will synchronize the transaction ID when they are spawned and they will not call this function anyway.\r\n\r\nthank you\r\n\r\nCary Huang\r\n----------------\r\nHighGo Software Canada\r\nwww.highgo.ca", "msg_date": "Fri, 17 Sep 2021 20:40:47 +0000", "msg_from": "Cary Huang <cary.huang@highgo.ca>", "msg_from_op": false, "msg_subject": "Re: Automatic notification of top transaction IDs" }, { "msg_contents": "> On 22 Jul 2021, at 07:28, vignesh C <vignesh21@gmail.com> wrote:\n> \n> On Thu, Jul 1, 2021 at 6:41 AM Gurjeet Singh <gurjeet@singh.im> wrote:\n>> \n>> The proposed patch is attached.\n> \n> There is one compilation warning:\n> xid.c:165:1: warning: no previous prototype for\n> ‘FullTransactionIdToStr’ [-Wmissing-prototypes]\n> 165 | FullTransactionIdToStr(FullTransactionId fxid)\n> | ^~~~~~~~~~~~~~~~~~~~~~\n> \n> There are few compilation issues in documentation:\n> /usr/bin/xmllint --path . --noout --valid postgres.sgml\n> protocol.sgml:1327: parser error : Opening and ending tag mismatch:\n> literal line 1322 and para\n> </para>\n> ^\n> protocol.sgml:1339: parser error : Opening and ending tag mismatch:\n> literal line 1322 and sect2\n> </sect2>\n> ^\n> protocol.sgml:1581: parser error : Opening and ending tag mismatch:\n> para line 1322 and sect1\n> </sect1>\n> ^\n> protocol.sgml:7893: parser error : Opening and ending tag mismatch:\n> sect2 line 1322 and chapter\n> </chapter>\n> ^\n> protocol.sgml:7894: parser error : chunk is not well balanced\n> \n> ^\n> postgres.sgml:253: parser error : Failure to process entity protocol\n> &protocol;\n> ^\n> postgres.sgml:253: parser error : Entity 'protocol' not defined\n> &protocol;\n> ^\n\nThe above compiler warning and documentation compilation errors haven't been\naddressed still, so I'm marking this patch Returned with Feedback. Please feel\nfree to open a new entry for an updated patch.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Thu, 4 Nov 2021 14:05:30 +0100", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: Automatic notification of top transaction IDs" }, { "msg_contents": "On Wed, Jun 30, 2021 at 8:56 PM Gurjeet Singh <gurjeet@singh.im> wrote:\n> As mentioned in that thread, when sending a cancellation signal, the\n> client cannot be sure if the cancel signal was honored, and if the\n> transaction was cancelled successfully. In the attached patch, the\n> backend emits a NotificationResponse containing the current full\n> transaction id. It does so only if the relevant GUC is enabled, and\n> when the top-transaction is being assigned the ID.\n\nThere's nothing to keep a client that wants this information from just\nusing SELECT txid_current() to get it, so this doesn't really seem\nworth it to me. It's true that it could be convenient for someone not\nto need to issue an SQL query to get the information and instead just\nget it automatically, but I don't think that minor convenience is\nenough to justify a new feature of this type.\n\nAlso, your 8-line documentation changes contains two spelling\nmistakes, and you've used // comments which are not project style in\ntwo places. It's a good idea to check over your patches for such\nsimple mistakes before submitting them.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Thu, 4 Nov 2021 11:01:26 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Automatic notification of top transaction IDs" } ]
[ { "msg_contents": "Attached is a combined diff for a set of related patches to the built-in\npgbench workloads. One commit adds an INSERT workload. One fixes the long\nstanding 0 length filler issue. A new --extra-indexes option adds the\nindexes needed for lookups added by the --foreign-keys option.\n\nThe commits are independent but overlap in goals. I'm grouping them here\nmainly to consolidate this message, covering the feedback leading to this\nparticular combination plus a first review from me. More graphs etc.\ncoming as my pgbench toolchain settles down again.\n\nCode all by David Christensen based on vague specs from me, errors probably\nmine, changes are also at\nhttps://github.com/pgguru/postgres/commits/pgbench-improvements David ran\nthrough the pgbench TAP regression tests and we're thinking about how to\nadd more for changes like this. Long term that collides with performance\ntesting for things like CREATE INDEX, which I've done some work on myself\nrecently in pgbench-tools.\n\nAfter bouncing the possibilities around a little, David and I thought this\nspecific set of changes might be the right amount of change for one PG\nversion. Core development could bite on all these pgbench changes or even\nmore [foreshadowing] as part of a themed rework of pgbench's workload\nthat's known to adjust results a bit, so beware direct comparisons to old\nversions. That's what I'd prefer to do, a break it all at once strategy\nfor these items and whatever else we can dig up this cycle. I'll do my\nusual thing to help with that, starting with more benchmark graphs of this\npatch and such once my pgbench toolchain settles again.\n\nTo me pgbench should continue to demonstrate good PostgreSQL client\nbehavior, and all this is just modernizing polish. Row size and indexing\nmatter of course, but none of these changes really alter the fundamentals\nof pgbench results. With modern hardware acceleration, the performance\ndrag due to the increased size of the filler is so much further down in the\nbenchmark noise from where I started at with PG. The $750 USD AMD retail\nchip in my basement lab pushes 1M TPS of prepared SELECT statements over\nsockets. Plus or minus 84 bytes per row in a benchmark database doesn't\nworry me so much anymore. Seems down there with JSON overhead as a lost\nmicro optimization fight nowadays.\n\n# Background: pgbench vs. sysbench\n\nThis whole rework idea came from a performance review pass where I compared\npgbench and sysbench again, as both have evolved a good bit since my last\ncomparison. All of the software defined storage testing brewing right now\nis shining a brighter light on both tools lately than I've seen in a while.\n\nThe goal I worked on a bit (with Joe Conway and RedHat, thank you to our\nsponsors) was how to make both tools closer to equal when performing\nsimilar tasks. pgbench can duplicate the basics of the sysbench OLTP\nworkload easily enough, running custom pgbench scripts against the\ngenerated pgbench_accounts and/or the initially empty pgbench_history. Joe\nand I did some work on sysbench to improve its error handling to where it\nreconnected automatically as part of that. How to add a reconnection\nfeature to pgbench is a struggle because of where it fits between PG's\ntypical connection and connection pooler abstractions; different story than\nthis one. sysbench had the basics and just needed some error handling bug\nfixes, which might even have made their way upstream. These three patches\nare the changes I thought core PG could use in parallel, as a mix of\ncorrectness, new features, and fair play in benchmarking.\n\n# INSERT workload\n\nThe easiest way to measure the basic COMMIT overhead of network storage is\nby doing an INSERT into an empty database and seeing the latency. I've\nbeen doing that regularly since 9.1 added sync rep and that was the easiest\nway to test client scaling. From my perspective as an old CRUD app writer,\ncreating a row is the main interesting operation that's not already\navailable in pgbench. (No one has a DELETE heavy workload for very long)\n\nSome chunk of pgbench users are trying to do that job now using the\nbuilt-ins, and none of the options fit well. Anything that touches the\naccounts table becomes heavily wrapped into the checkpoint cycle, and\nextracting signal from checkpoint noise is so hard dudes charge for books\nabout it. In this context I trust INSERT results more than I do the output\nfrom pg_test_fsync, which is too low level for me to recommend as a\ngeneral-purpose tool.\n\nFor better or worse pgbench is a primary tool in that role to PG customers,\nand the INSERT scaling looks great all over. I've attached an early sample\ncomparing 5 models of SSD to show it; what looks like a PG14 regression\nthere is a testing artifact I'm working on.\n\nThe INSERT workload is useful with or without the history indexes, which\nagain as written here only are created if you ask for the FKs. When I do\nthese performance studies of INSERT scaling as a new history table builds,\nfor really no good reason other than my curiosity, the slowdowns from\nwhether the pgbench_history has keys on it seem like basic primary key\noverhead to me.\n\n# FK indexes\n\nThe new extra index set always appears if you turn on FKs after this\nchange. Then there's also the original path to turn on the indexes but not\nthe FKs.\n\nAs I don't consider the use case of FKs without indexes to exist in the\nwild, I was surprised at the current state of things, that you could even\nhave FKs but not the associated indexes. I have not RTFA for it but I'd\nwager it's been brought up before. In that case, +1 from me and David for\nthis patch's view of database correctness I guess.\n\nOn a fresh pgbench database, the history table is empty and only the\naccounts table has serious size to it. Adding indexes to the other tables,\nlike this patch does, has light overhead during the creation cycle.\n\nMy take on INSERT/UPDATE workloads that once you're hitting disk and have\nWAL changes, whether one or three index blocks are touched each time on the\nsmall tables is so much more of a checkpoint problem than anything else.\nThe overhead these new indexes add should be in the noise of the standard\npgbench \"TPC-B (sort of)\" workload.\n\nThe index overhead only gets substantial once you've run pgbench long\nenough that history has some size to it. The tiny slice of people using\npgbench for long-term simulation--which might only be me--are surely\nsophisticated enough to deal with index overhead increasing from zero to\nnormal primary key index overhead.\n\nI personally would prefer to see pgbench lead by example here, that tables\nrelated this way should be indexed with FKs by default, as the Right Way to\ndo such things. There's a slow deprecation plan leading that way possible\nfrom here. This patch set adds options to add those indexes, and slowly\nthose options could become the defaults. Or there's the break it all at\nonce and the FK+Index path is the new default path forward, and users would\nhave to turn it off if they want to reduce overhead.\n\n# filler\n\nEvery few years a customer I deal with discovers pgbench's generated tables\ndon't really fill its filler column. I think on modern hardware it's time\nto pay for that fully, as not as scary of a performance regression.\n memcpy() is AVX accelerated for me on Linux now; it's not the old C\nstandard library doing the block work. When I field detailed questions\nabout the filler, why it's length is 0, how the problem was introduced, and\nwhy it was never fixed before, it's not the best look.\n\n From port 5432 you can identify if a patched pgbench client created the\ndatabase like this:\n\n pgbench# SELECT length(filler) FROM pgbench_accounts LIMIT 1;\n length | 84\n\nThat is 0 in HEAD. I'd really prefer not to have to pause and explain this\nfiller thing again. It looks a little too much like benchmark mischief for\nmy comfort, which the whole sysbench comparison really highlighted again.", "msg_date": "Wed, 30 Jun 2021 23:18:41 -0400", "msg_from": "Gregory Smith <gregsmithpgsql@gmail.com>", "msg_from_op": true, "msg_subject": "pgbench: INSERT workload, FK indexes, filler fix" }, { "msg_contents": "\nHello Greg,\n\nSome quick feedback about the patch and the arguments.\n\nFilling: having an empty string/NULL has been bothering me for some time. \nHowever there is a significant impact on the client/server network stream \nwhile initializing or running queries, which means that pgbench older \nperformance report would be comparable to newer ones, which is a pain even \nif the new results do make sense, as you noted in a comment. I'm okay with \nbreaking that, but it would require a consensus: People would run pgbench \non a previous install, upgrade, run pgbench again, and report a massive \nperformance regression. Who will have to deal with that noise?\n\nA work around could be to add new workloads with different names, and let \nthe previous workloads more or less as is.\n\n\"--insert-only\" as a short hand for \"-b insert-only\": I do not think this \nis really needed to save 1 char. Also note that \"-b i\" would probably \nwork.\n\nextra indexes: I'm ok on principle. Do we want an option for that though? \nIsn't adding \"i\" to -I enough? Also I do not like much the code which \nmodifies the -I provided string to add a \"i\".\n\n> After bouncing the possibilities around a little, David and I thought this\n> specific set of changes might be the right amount of change for one PG\n> version.\n\nHmmm. I was hoping for more changes:-) Eg the current error handling patch \nwould be great.\n\n> benchmark noise from where I started at with PG. The $750 USD AMD retail\n> chip in my basement lab pushes 1M TPS of prepared SELECT statements over\n> sockets. Plus or minus 84 bytes per row in a benchmark database doesn't\n> worry me so much anymore.\n\nAFAICR the space is actually allocated by pg and filled with blanks, just \nnot transfered by the protocol? For an actual network connection I guess \nthe effect should be quite noticeable.\n\n> [...]\n> I personally would prefer to see pgbench lead by example here, that tables\n> related this way should be indexed with FKs by default, as the Right Way to\n> do such things.\n\nI do agree that the default should be the good choices, and that some \nmanual effort should be done to get the bad ones. The only issue is that \npeople do not like change.\n\n-- \nFabien.\n\n\n", "msg_date": "Thu, 1 Jul 2021 12:47:57 +0200 (CEST)", "msg_from": "Fabien COELHO <coelho@cri.ensmp.fr>", "msg_from_op": false, "msg_subject": "Re: pgbench: INSERT workload, FK indexes, filler fix" }, { "msg_contents": "\nFabien COELHO writes:\n\n> Hello Greg,\n>\n> Some quick feedback about the patch and the arguments.\n>\n> Filling: having an empty string/NULL has been bothering me for some time. However there is a\n> significant impact on the client/server network stream while initializing or running queries, which\n> means that pgbench older performance report would be comparable to newer ones, which is a pain even \n> if the new results do make sense, as you noted in a comment. I'm okay with breaking that, but it\n> would require a consensus: People would run pgbench on a previous install, upgrade, run pgbench\n> again, and report a massive performance regression. Who will have to deal with that noise?\n\nI agree that it is a behavior change, but \"filler\" that literally includes nothing but a NULL bitmap\nor minimal-length column isn't really measuring what it sets out to measure, so to me it seems like\nwe need to bite the bullet and just start doing what we claim to already be doing; this is something\nthat has been inaccurate for a long time, and continuing to keep it inaccurate in the name of\nconsistency seems to be the wrong tack to take here. (My argument to the group at large, not you\nspecifically.)\n\nI assume that we will need to include a big note in the documentation about the behavior change,\nperhaps even a note in the output of pgbench itself; the \"right\" answer can be bikeshedded about.\n\n> A work around could be to add new workloads with different names, and let the previous workloads\n> more or less as is.\n\nYou're basically suggesting \"tpcb-like-traditional\" and \"tcpb-like-actual\"? :-) I guess that would\nbe an approach of sorts, though more than one of the built-ins needed to change in this, and I\nquestion how useful expanding these workloads will be.\n\n> \"--insert-only\" as a short hand for \"-b insert-only\": I do not think this is really needed to save 1\n> char. Also note that \"-b i\" would probably work.\n\nFair; I was just mirroring the existing structure.\n\n> extra indexes: I'm ok on principle. Do we want an option for that though? Isn't adding \"i\" to -I\n> enough? Also I do not like much the code which modifies the -I provided string to add a \"i\".\n\nTo me it seems disingenuous to setup a situation where you'd have FKs with no indexes, which is why\nI'd added that modification; unless you're talking anout something different?\n\n>> After bouncing the possibilities around a little, David and I thought this\n>> specific set of changes might be the right amount of change for one PG\n>> version.\n>\n> Hmmm. I was hoping for more changes:-) Eg the current error handling patch would be great.\n\nI'm happy to continue working on improving this part of the program.\n\n>> benchmark noise from where I started at with PG. The $750 USD AMD retail\n>> chip in my basement lab pushes 1M TPS of prepared SELECT statements over\n>> sockets. Plus or minus 84 bytes per row in a benchmark database doesn't\n>> worry me so much anymore.\n>\n> AFAICR the space is actually allocated by pg and filled with blanks, just not transfered by the\n> protocol? For an actual network connection I guess the effect should be quite noticeable.\n\nThis patchset included filling with actual bytes, not just padding (or implied padding via\nchar(n)). Depending on how this is invoked, it could definitely add some network overhead (though\nI'd be surprised if it pushed it over a single packet relative to the original size of the query).\n\n>> [...]\n>> I personally would prefer to see pgbench lead by example here, that tables\n>> related this way should be indexed with FKs by default, as the Right Way to\n>> do such things.\n>\n> I do agree that the default should be the good choices, and that some manual effort should be done\n> to get the bad ones. The only issue is that people do not like change.\n\nHeh, you are not wrong here. Hopefully we can get some consensus about this being the right way\nforward.\n\nBest,\n\nDavid\n\n\n", "msg_date": "Thu, 01 Jul 2021 10:54:02 -0500", "msg_from": "David Christensen <david.christensen@crunchydata.com>", "msg_from_op": false, "msg_subject": "Re: pgbench: INSERT workload, FK indexes, filler fix" } ]
[ { "msg_contents": "It seems like a few too many years of an SQL standard without any\nstandardised way to LIMIT the number of records in a result set caused\nvarious applications to adopt some strange ways to get this behaviour.\nOver here in the PostgreSQL world, we just type LIMIT n; at the end of\nour queries. I believe Oracle people did a few tricks with a special\ncolumn named \"rownum\". Another set of people needed SQL that would\nwork over multiple DBMSes and used something like:\n\nSELECT * FROM (SELECT ... row_number() over (order by ...) rn) a WHERE rn <= 10;\n\nI believe it's fairly common to do paging this way on commerce sites.\n\nThe problem with PostgreSQL here is that neither the planner nor\nexecutor knows that once we get to row_number 11 that we may as well\nstop. The number will never go back down in this partition.\n\nI'd like to make this better for PostgreSQL 15. I've attached a WIP\npatch to do so.\n\nHow this works is that I've added prosupport functions for each of\nrow_number(), rank() and dense_rank(). When doing qual pushdown, if\nwe happen to hit a windowing function, instead of rejecting the\npushdown, we see if there's a prosupport function and if there is, ask\nit if this qual can be used to allow us to stop emitting tuples from\nthe Window node by making use of this qual. I've called these \"run\nconditions\". Basically, keep running while this remains true. Stop\nwhen it's not.\n\nWe can't always use the qual directly. For example, if someone does.\n\nSELECT * FROM (SELECT ... row_number() over (order by ...) rn) a WHERE rn = 10;\n\nthen if we use the rn = 10 qual, we'd think we could stop right away.\nInstead, I've made the prosupport function handle this by generating a\nrn <= 10 qual so that we can stop once we get to 11. In this case we\ncannot completely pushdown the qual. It needs to remain in place to\nfilter out rn values 1-9.\n\nRow_number(), rank() and dense_rank() are all monotonically increasing\nfunctions. But we're not limited to just those. COUNT(*) works too\nproviding the frame bounds guarantee that the function is either\nmonotonically increasing or decreasing.\n\nCOUNT(*) OVER (ORDER BY .. ROWS BETWEEN CURRENT ROW AND UNBOUNDED\nFOLLOWING) is monotonically decreasing, whereas the standard bound\noptions would make it monotonically increasing.\n\nThe same could be done for MIN() and MAX(). I just don't think that's\nworth doing. It seems unlikely that would get enough use.\n\nAnyway. I'd like to work on this more during the PG15 cycle. I\nbelieve the attached patch makes this work ok. There are just a few\nthings to iron out.\n\n1) Unsure of the API to the prosupport function. I wonder if the\nprosupport function should just be able to say if the function is\neither monotonically increasing or decreasing or neither then have\ncore code build a qual. That would make the job of building new\nfunctions easier, but massively reduce the flexibility of the feature.\nI'm just not sure it needs to do more in the future.\n\n2) Unsure if what I've got to make EXPLAIN show the run condition is\nthe right way to do it. Because I don't want nodeWindow.c to have to\nre-evaluate the window function to determine of the run condition is\nno longer met, I've coded the qual to reference the varno in the\nwindow node's targetlist. That qual is no good for EXPLAIN so had to\ninclude another set of quals that include the WindowFunc reference. I\nsaw that Index Only Scans have a similar means to make EXPLAIN work,\nso I just followed that.\n\nDavid", "msg_date": "Thu, 1 Jul 2021 21:11:21 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Window Function \"Run Conditions\"" }, { "msg_contents": "čt 1. 7. 2021 v 11:11 odesílatel David Rowley <dgrowleyml@gmail.com> napsal:\n\n> It seems like a few too many years of an SQL standard without any\n> standardised way to LIMIT the number of records in a result set caused\n> various applications to adopt some strange ways to get this behaviour.\n> Over here in the PostgreSQL world, we just type LIMIT n; at the end of\n> our queries. I believe Oracle people did a few tricks with a special\n> column named \"rownum\". Another set of people needed SQL that would\n> work over multiple DBMSes and used something like:\n>\n> SELECT * FROM (SELECT ... row_number() over (order by ...) rn) a WHERE rn\n> <= 10;\n>\n> I believe it's fairly common to do paging this way on commerce sites.\n>\n> The problem with PostgreSQL here is that neither the planner nor\n> executor knows that once we get to row_number 11 that we may as well\n> stop. The number will never go back down in this partition.\n>\n> I'd like to make this better for PostgreSQL 15. I've attached a WIP\n> patch to do so.\n>\n> How this works is that I've added prosupport functions for each of\n> row_number(), rank() and dense_rank(). When doing qual pushdown, if\n> we happen to hit a windowing function, instead of rejecting the\n> pushdown, we see if there's a prosupport function and if there is, ask\n> it if this qual can be used to allow us to stop emitting tuples from\n> the Window node by making use of this qual. I've called these \"run\n> conditions\". Basically, keep running while this remains true. Stop\n> when it's not.\n>\n> We can't always use the qual directly. For example, if someone does.\n>\n> SELECT * FROM (SELECT ... row_number() over (order by ...) rn) a WHERE rn\n> = 10;\n>\n> then if we use the rn = 10 qual, we'd think we could stop right away.\n> Instead, I've made the prosupport function handle this by generating a\n> rn <= 10 qual so that we can stop once we get to 11. In this case we\n> cannot completely pushdown the qual. It needs to remain in place to\n> filter out rn values 1-9.\n>\n> Row_number(), rank() and dense_rank() are all monotonically increasing\n> functions. But we're not limited to just those. COUNT(*) works too\n> providing the frame bounds guarantee that the function is either\n> monotonically increasing or decreasing.\n>\n> COUNT(*) OVER (ORDER BY .. ROWS BETWEEN CURRENT ROW AND UNBOUNDED\n> FOLLOWING) is monotonically decreasing, whereas the standard bound\n> options would make it monotonically increasing.\n>\n> The same could be done for MIN() and MAX(). I just don't think that's\n> worth doing. It seems unlikely that would get enough use.\n>\n> Anyway. I'd like to work on this more during the PG15 cycle. I\n> believe the attached patch makes this work ok. There are just a few\n> things to iron out.\n>\n> 1) Unsure of the API to the prosupport function. I wonder if the\n> prosupport function should just be able to say if the function is\n> either monotonically increasing or decreasing or neither then have\n> core code build a qual. That would make the job of building new\n> functions easier, but massively reduce the flexibility of the feature.\n> I'm just not sure it needs to do more in the future.\n>\n> 2) Unsure if what I've got to make EXPLAIN show the run condition is\n> the right way to do it. Because I don't want nodeWindow.c to have to\n> re-evaluate the window function to determine of the run condition is\n> no longer met, I've coded the qual to reference the varno in the\n> window node's targetlist. That qual is no good for EXPLAIN so had to\n> include another set of quals that include the WindowFunc reference. I\n> saw that Index Only Scans have a similar means to make EXPLAIN work,\n> so I just followed that.\n>\n\n+1\n\nthis can be very nice feature\n\nPavel\n\n\n\n>\n> David\n>\n\nčt 1. 7. 2021 v 11:11 odesílatel David Rowley <dgrowleyml@gmail.com> napsal:It seems like a few too many years of an SQL standard without any\nstandardised way to LIMIT the number of records in a result set caused\nvarious applications to adopt some strange ways to get this behaviour.\nOver here in the PostgreSQL world, we just type LIMIT n; at the end of\nour queries.  I believe Oracle people did a few tricks with a special\ncolumn named \"rownum\". Another set of people needed SQL that would\nwork over multiple DBMSes and used something like:\n\nSELECT * FROM (SELECT ... row_number() over (order by ...) rn) a WHERE rn <= 10;\n\nI believe it's fairly common to do paging this way on commerce sites.\n\nThe problem with PostgreSQL here is that neither the planner nor\nexecutor knows that once we get to row_number 11 that we may as well\nstop.  The number will never go back down in this partition.\n\nI'd like to make this better for PostgreSQL 15. I've attached a WIP\npatch to do so.\n\nHow this works is that I've added prosupport functions for each of\nrow_number(), rank() and dense_rank().  When doing qual pushdown, if\nwe happen to hit a windowing function, instead of rejecting the\npushdown, we see if there's a prosupport function and if there is, ask\nit if this qual can be used to allow us to stop emitting tuples from\nthe Window node by making use of this qual.  I've called these \"run\nconditions\".  Basically, keep running while this remains true. Stop\nwhen it's not.\n\nWe can't always use the qual directly. For example, if someone does.\n\nSELECT * FROM (SELECT ... row_number() over (order by ...) rn) a WHERE rn = 10;\n\nthen if we use the rn = 10 qual, we'd think we could stop right away.\nInstead, I've made the prosupport function handle this by generating a\nrn <= 10 qual so that we can stop once we get to 11.  In this case we\ncannot completely pushdown the qual. It needs to remain in place to\nfilter out rn values 1-9.\n\nRow_number(), rank() and dense_rank() are all monotonically increasing\nfunctions.  But we're not limited to just those.  COUNT(*) works too\nproviding the frame bounds guarantee that the function is either\nmonotonically increasing or decreasing.\n\nCOUNT(*) OVER (ORDER BY .. ROWS BETWEEN CURRENT ROW AND UNBOUNDED\nFOLLOWING) is monotonically decreasing, whereas the standard bound\noptions would make it monotonically increasing.\n\nThe same could be done for MIN() and MAX(). I just don't think that's\nworth doing. It seems unlikely that would get enough use.\n\nAnyway. I'd like to work on this more during the PG15 cycle.  I\nbelieve the attached patch makes this work ok. There are just a few\nthings to iron out.\n\n1) Unsure of the API to the prosupport function.  I wonder if the\nprosupport function should just be able to say if the function is\neither monotonically increasing or decreasing or neither then have\ncore code build a qual.  That would make the job of building new\nfunctions easier, but massively reduce the flexibility of the feature.\nI'm just not sure it needs to do more in the future.\n\n2) Unsure if what I've got to make EXPLAIN show the run condition is\nthe right way to do it. Because I don't want nodeWindow.c to have to\nre-evaluate the window function to determine of the run condition is\nno longer met, I've coded the qual to reference the varno in the\nwindow node's targetlist.  That qual is no good for EXPLAIN so had to\ninclude another set of quals that include the WindowFunc reference. I\nsaw that Index Only Scans have a similar means to make EXPLAIN work,\nso I just followed that.+1this can be very nice featurePavel \n\nDavid", "msg_date": "Thu, 1 Jul 2021 11:17:22 +0200", "msg_from": "Pavel Stehule <pavel.stehule@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Thu, 1 Jul 2021 at 21:11, David Rowley <dgrowleyml@gmail.com> wrote:\n> 1) Unsure of the API to the prosupport function. I wonder if the\n> prosupport function should just be able to say if the function is\n> either monotonically increasing or decreasing or neither then have\n> core code build a qual. That would make the job of building new\n> functions easier, but massively reduce the flexibility of the feature.\n> I'm just not sure it needs to do more in the future.\n\nI looked at this patch again today and ended up changing the API that\nI'd done for the prosupport functions. These just now set a new\n\"monotonic\" field in the (also newly renamed)\nSupportRequestWFuncMonotonic struct. This can be set to one of the\nvalues from the newly added MonotonicFunction enum, namely:\nMONOTONICFUNC_NONE, MONOTONICFUNC_INCREASING, MONOTONICFUNC_DECREASING\nor MONOTONICFUNC_BOTH.\n\nI also added handling for a few more cases that are perhaps rare but\ncould be done with just a few lines of code. For example; COUNT(*)\nOVER() is MONOTONICFUNC_BOTH as it can neither increase nor decrease\nfor a given window partition. I think technically all of the standard\nset of aggregate functions could have a prosupport function to handle\nthat case. Min() and Max() could go a little further, but I'm not sure\nif adding handling for that would be worth it, and if someone does\nthink that it is worth it, then I'd rather do that as a separate\npatch.\n\nI put the MonotonicFunction enum in plannodes.h. There's nothing\nspecific about window functions or support functions. It could, for\nexample, be reused again for something else such as monotonic\nset-returning functions.\n\nOne thing which I'm still not sure about is where\nfind_window_run_conditions() should be located. Currently, it's in\nallpaths.c but that does not really feel like the right place to me.\nWe do have planagg.c in src/backend/optimizer/plan, maybe we need\nplanwindow.c?\n\nDavid", "msg_date": "Mon, 16 Aug 2021 22:28:24 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Mon, Aug 16, 2021 at 3:28 AM David Rowley <dgrowleyml@gmail.com> wrote:\n\n> On Thu, 1 Jul 2021 at 21:11, David Rowley <dgrowleyml@gmail.com> wrote:\n> > 1) Unsure of the API to the prosupport function. I wonder if the\n> > prosupport function should just be able to say if the function is\n> > either monotonically increasing or decreasing or neither then have\n> > core code build a qual. That would make the job of building new\n> > functions easier, but massively reduce the flexibility of the feature.\n> > I'm just not sure it needs to do more in the future.\n>\n> I looked at this patch again today and ended up changing the API that\n> I'd done for the prosupport functions. These just now set a new\n> \"monotonic\" field in the (also newly renamed)\n> SupportRequestWFuncMonotonic struct. This can be set to one of the\n> values from the newly added MonotonicFunction enum, namely:\n> MONOTONICFUNC_NONE, MONOTONICFUNC_INCREASING, MONOTONICFUNC_DECREASING\n> or MONOTONICFUNC_BOTH.\n>\n> I also added handling for a few more cases that are perhaps rare but\n> could be done with just a few lines of code. For example; COUNT(*)\n> OVER() is MONOTONICFUNC_BOTH as it can neither increase nor decrease\n> for a given window partition. I think technically all of the standard\n> set of aggregate functions could have a prosupport function to handle\n> that case. Min() and Max() could go a little further, but I'm not sure\n> if adding handling for that would be worth it, and if someone does\n> think that it is worth it, then I'd rather do that as a separate\n> patch.\n>\n> I put the MonotonicFunction enum in plannodes.h. There's nothing\n> specific about window functions or support functions. It could, for\n> example, be reused again for something else such as monotonic\n> set-returning functions.\n>\n> One thing which I'm still not sure about is where\n> find_window_run_conditions() should be located. Currently, it's in\n> allpaths.c but that does not really feel like the right place to me.\n> We do have planagg.c in src/backend/optimizer/plan, maybe we need\n> planwindow.c?\n>\n> David\n>\nHi,\n\n+ if ((res->monotonic & MONOTONICFUNC_INCREASING) ==\nMONOTONICFUNC_INCREASING)\n\nThe above can be simplified as 'if (res->monotonic &\nMONOTONICFUNC_INCREASING) '\n\nCheers\n\nOn Mon, Aug 16, 2021 at 3:28 AM David Rowley <dgrowleyml@gmail.com> wrote:On Thu, 1 Jul 2021 at 21:11, David Rowley <dgrowleyml@gmail.com> wrote:\n> 1) Unsure of the API to the prosupport function.  I wonder if the\n> prosupport function should just be able to say if the function is\n> either monotonically increasing or decreasing or neither then have\n> core code build a qual.  That would make the job of building new\n> functions easier, but massively reduce the flexibility of the feature.\n> I'm just not sure it needs to do more in the future.\n\nI looked at this patch again today and ended up changing the API that\nI'd done for the prosupport functions.  These just now set a new\n\"monotonic\" field in the (also newly renamed)\nSupportRequestWFuncMonotonic struct. This can be set to one of the\nvalues from the newly added MonotonicFunction enum, namely:\nMONOTONICFUNC_NONE, MONOTONICFUNC_INCREASING, MONOTONICFUNC_DECREASING\nor MONOTONICFUNC_BOTH.\n\nI also added handling for a few more cases that are perhaps rare but\ncould be done with just a few lines of code. For example; COUNT(*)\nOVER() is MONOTONICFUNC_BOTH as it can neither increase nor decrease\nfor a given window partition. I think technically all of the standard\nset of aggregate functions could have a prosupport function to handle\nthat case. Min() and Max() could go a little further, but I'm not sure\nif adding handling for that would be worth it, and if someone does\nthink that it is worth it, then I'd rather do that as a separate\npatch.\n\nI put the MonotonicFunction enum in plannodes.h. There's nothing\nspecific about window functions or support functions. It could, for\nexample, be reused again for something else such as monotonic\nset-returning functions.\n\nOne thing which I'm still not sure about is where\nfind_window_run_conditions() should be located. Currently, it's in\nallpaths.c but that does not really feel like the right place to me.\nWe do have planagg.c in src/backend/optimizer/plan, maybe we need\nplanwindow.c?\n\nDavidHi,+               if ((res->monotonic & MONOTONICFUNC_INCREASING) == MONOTONICFUNC_INCREASING)The above can be simplified as 'if (res->monotonic & MONOTONICFUNC_INCREASING) 'Cheers", "msg_date": "Mon, 16 Aug 2021 08:57:12 -0700", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Tue, 17 Aug 2021 at 03:51, Zhihong Yu <zyu@yugabyte.com> wrote:\n> + if ((res->monotonic & MONOTONICFUNC_INCREASING) == MONOTONICFUNC_INCREASING)\n>\n> The above can be simplified as 'if (res->monotonic & MONOTONICFUNC_INCREASING) '\n\nTrue. I've attached an updated patch.\n\nDavid", "msg_date": "Wed, 18 Aug 2021 22:39:36 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "Hi David:\n\nThanks for the patch.\n\nOn Wed, Aug 18, 2021 at 6:40 PM David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Tue, 17 Aug 2021 at 03:51, Zhihong Yu <zyu@yugabyte.com> wrote:\n> > + if ((res->monotonic & MONOTONICFUNC_INCREASING) == MONOTONICFUNC_INCREASING)\n> >\n> > The above can be simplified as 'if (res->monotonic & MONOTONICFUNC_INCREASING) '\n>\n> True. I've attached an updated patch.\n>\n> David\n\nLooks like we need to narrow down the situation where we can apply\nthis optimization.\n\nSELECT * FROM\n (SELECT empno,\n salary,\n count(*) over (order by empno desc) as c ,\n dense_rank() OVER (ORDER BY salary DESC) dr\n\n FROM empsalary) emp\nWHERE dr = 1;\n\nIn the current master, the result is:\n\n empno | salary | c | dr\n\n-------+--------+---+----\n\n 8 | 6000 | 4 | 1\n\n(1 row)\n\nIn the patched version, the result is:\n\n empno | salary | c | dr\n\n-------+--------+---+----\n\n 8 | 6000 | 1 | 1\n\n(1 row)\n\n-- \nBest Regards\nAndy Fan (https://www.aliyun.com/)\n\n\n", "msg_date": "Wed, 18 Aug 2021 20:20:45 +0800", "msg_from": "Andy Fan <zhihui.fan1213@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Thu, 19 Aug 2021 at 00:20, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n> In the current master, the result is:\n>\n> empno | salary | c | dr\n> -------+--------+---+----\n> 8 | 6000 | 4 | 1\n\n> In the patched version, the result is:\n>\n> empno | salary | c | dr\n> -------+--------+---+----\n> 8 | 6000 | 1 | 1\n\nThanks for taking it for a spin.\n\nThat's a bit unfortunate. I don't immediately see how to fix it other\nthan to restrict the optimisation to only apply when there's a single\nWindowClause. It might be possible to relax it further and only apply\nif it's the final window clause to be evaluated, but in those cases,\nthe savings are likely to be much less anyway as some previous\nWindowAgg will have exhausted all rows from its subplan. Likely\nrestricting it to only working if there's 1 WindowClause would be fine\nas for the people using row_number() for a top-N type query, there's\nmost likely only going to be 1 WindowClause.\n\nAnyway, I'll take a few more days to think about it before posting a fix.\n\nDavid\n\n\n", "msg_date": "Thu, 19 Aug 2021 18:35:27 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Thu, Aug 19, 2021 at 2:35 PM David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Thu, 19 Aug 2021 at 00:20, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n> > In the current master, the result is:\n> >\n> > empno | salary | c | dr\n> > -------+--------+---+----\n> > 8 | 6000 | 4 | 1\n>\n> > In the patched version, the result is:\n> >\n> > empno | salary | c | dr\n> > -------+--------+---+----\n> > 8 | 6000 | 1 | 1\n>\n> Thanks for taking it for a spin.\n>\n> That's a bit unfortunate. I don't immediately see how to fix it other\n> than to restrict the optimisation to only apply when there's a single\n> WindowClause. It might be possible to relax it further and only apply\n> if it's the final window clause to be evaluated, but in those cases,\n> the savings are likely to be much less anyway as some previous\n> WindowAgg will have exhausted all rows from its subplan.\n\nI am trying to hack the select_active_windows function to make\nsure the WindowClause with Run Condition clause to be the last one\nto evaluate (we also need to consider more than 1 window func has\nrun condition), at that time, the run condition clause is ready already.\n\nHowever there are two troubles in this direction: a). This may conflict\nwith \"the windows that need the same sorting are adjacent in the list.\"\nb). \"when two or more windows are order-equivalent then all peer rows\nmust be presented in the same order in all of them. .. (See General Rule 4 of\n<window clause> in SQL2008 - SQL2016.)\"\n\nIn summary, I am not sure if it is correct to change the execution Order\nof WindowAgg freely.\n\n> Likely\n> restricting it to only working if there's 1 WindowClause would be fine\n> as for the people using row_number() for a top-N type query, there's\n> most likely only going to be 1 WindowClause.\n>\n\nThis sounds practical. And I suggest the following small changes.\n(just check the partitionClause before the prosupport)\n\n@@ -2133,20 +2133,22 @@ find_window_run_conditions(Query *subquery,\nRangeTblEntry *rte, Index rti,\n\n *keep_original = true;\n\n- prosupport = get_func_support(wfunc->winfnoid);\n-\n- /* Check if there's a support function for 'wfunc' */\n- if (!OidIsValid(prosupport))\n- return false;\n-\n /*\n * Currently the WindowAgg node just stop when the run condition is no\n * longer true. If there is a PARTITION BY clause then we cannot just\n * stop as other partitions still need to be processed.\n */\n+\n+ /* Check this first since window function with a partition\nclause is common*/\n if (wclause->partitionClause != NIL)\n return false;\n\n+ prosupport = get_func_support(wfunc->winfnoid);\n+\n+ /* Check if there's a support function for 'wfunc' */\n+ if (!OidIsValid(prosupport))\n+ return false;\n+\n /* get the Expr from the other side of the OpExpr */\n if (wfunc_left)\n otherexpr = lsecond(opexpr->args);\n\n\n\n-- \nBest Regards\nAndy Fan (https://www.aliyun.com/)\n\n\n", "msg_date": "Thu, 26 Aug 2021 10:54:05 +0800", "msg_from": "Andy Fan <zhihui.fan1213@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "This looks like an awesome addition.\n\nI have one technical questions...\n\nIs it possible to actually transform the row_number case into a LIMIT\nclause or make the planner support for this case equivalent to it (in\nwhich case we can replace the LIMIT clause planning to transform into\na window function)?\n\nThe reason I ask is because the Limit plan node is actually quite a\nbit more optimized than the general window function plan node. It\ncalculates cost estimates based on the limit and can support Top-N\nsort nodes.\n\nBut the bigger question is whether this patch is ready for a committer\nto look at? Were you able to resolve Andy Fan's bug report? Did you\nresolve the two questions in the original email?\n\n\n", "msg_date": "Tue, 15 Mar 2022 17:23:40 -0400", "msg_from": "Greg Stark <stark@mit.edu>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Tue, Mar 15, 2022 at 5:24 PM Greg Stark <stark@mit.edu> wrote:\n\n> This looks like an awesome addition.\n>\n> I have one technical questions...\n>\n> Is it possible to actually transform the row_number case into a LIMIT\n> clause or make the planner support for this case equivalent to it (in\n> which case we can replace the LIMIT clause planning to transform into\n> a window function)?\n>\n> The reason I ask is because the Limit plan node is actually quite a\n> bit more optimized than the general window function plan node. It\n> calculates cost estimates based on the limit and can support Top-N\n> sort nodes.\n>\n> But the bigger question is whether this patch is ready for a committer\n> to look at? Were you able to resolve Andy Fan's bug report? Did you\n> resolve the two questions in the original email?\n>\n\n+1 to all this\n\nIt seems like this effort would aid in implementing what some other\ndatabases implement via the QUALIFY clause, which is to window functions\nwhat HAVING is to aggregate functions.\nexample:\nhttps://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#qualify_clause\n\nOn Tue, Mar 15, 2022 at 5:24 PM Greg Stark <stark@mit.edu> wrote:This looks like an awesome addition.\n\nI have one technical questions...\n\nIs it possible to actually transform the row_number case into a LIMIT\nclause or make the planner support for this case equivalent to it (in\nwhich case we can replace the LIMIT clause planning to transform into\na window function)?\n\nThe reason I ask is because the Limit plan node is actually quite a\nbit more optimized than the general window function plan node. It\ncalculates cost estimates based on the limit and can support Top-N\nsort nodes.\n\nBut the bigger question is whether this patch is ready for a committer\nto look at? Were you able to resolve Andy Fan's bug report? Did you\nresolve the two questions in the original email?+1 to all thisIt seems like this effort would aid in implementing what some other databases implement via the QUALIFY clause, which is to window functions what HAVING is to aggregate functions.example: https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#qualify_clause", "msg_date": "Thu, 17 Mar 2022 00:04:03 -0400", "msg_from": "Corey Huinker <corey.huinker@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "Hi,\n\nOn 2021-08-19 18:35:27 +1200, David Rowley wrote:\n> Anyway, I'll take a few more days to think about it before posting a fix.\n\nThe patch in the CF entry doesn't apply: http://cfbot.cputube.org/patch_37_3234.log\n\nThe quoted message was ~6 months ago. I think this CF entry should be marked\nas returned-with-feedback?\n\n- Andres\n\n\n", "msg_date": "Mon, 21 Mar 2022 18:07:18 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Thu, 26 Aug 2021 at 14:54, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n>\n> On Thu, Aug 19, 2021 at 2:35 PM David Rowley <dgrowleyml@gmail.com> wrote:\n> >\n> > On Thu, 19 Aug 2021 at 00:20, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n> > > In the current master, the result is:\n> > >\n> > > empno | salary | c | dr\n> > > -------+--------+---+----\n> > > 8 | 6000 | 4 | 1\n> >\n> > > In the patched version, the result is:\n> > >\n> > > empno | salary | c | dr\n> > > -------+--------+---+----\n> > > 8 | 6000 | 1 | 1\n> >\n> > Thanks for taking it for a spin.\n> >\n> > That's a bit unfortunate. I don't immediately see how to fix it other\n> > than to restrict the optimisation to only apply when there's a single\n> > WindowClause. It might be possible to relax it further and only apply\n> > if it's the final window clause to be evaluated, but in those cases,\n> > the savings are likely to be much less anyway as some previous\n> > WindowAgg will have exhausted all rows from its subplan.\n>\n> I am trying to hack the select_active_windows function to make\n> sure the WindowClause with Run Condition clause to be the last one\n> to evaluate (we also need to consider more than 1 window func has\n> run condition), at that time, the run condition clause is ready already.\n>\n> However there are two troubles in this direction: a). This may conflict\n> with \"the windows that need the same sorting are adjacent in the list.\"\n> b). \"when two or more windows are order-equivalent then all peer rows\n> must be presented in the same order in all of them. .. (See General Rule 4 of\n> <window clause> in SQL2008 - SQL2016.)\"\n>\n> In summary, I am not sure if it is correct to change the execution Order\n> of WindowAgg freely.\n\nThanks for looking at that.\n\nMy current thoughts are that it just feels a little too risky to\nadjust the comparison function that sorts the window clauses to pay\nattention to the run-condition.\n\nWe would need to ensure that there's just a single window function\nwith a run condition as it wouldn't be valid for there to be multiple.\nIt would be easy enough to ensure we only push quals into just 1\nwindow clause, but that and meddling with the evaluation order has\ntrade-offs. To do that properly, we'd likely want to consider the\ncosts when deciding which window clause would benefit from having\nquals pushed the most. Plus, if we start messing with the evaluation\norder then we'd likely really want some sort of costing to check if\npushing a qual and adjusting the evaluation order is actually cheaper\nthan not pushing the qual and keeping the clauses in the order that\nrequires the minimum number of sorts. The planner is not really\ngeared up for costing things like that properly, that's why we just\nassume the order with the least sorts is best. In reality that's often\nnot going to be true as an index may exist and we might want to\nevaluate a clause first if we could get rid of a sort and index scan\ninstead. Sorting the window clauses based on their SortGroupClause is\njust the best we can do for now at that stage in planning.\n\nI think it's safer to just disable the optimisation when there are\nmultiple window clauses. Multiple matching clauses are merged\nalready, so it's perfectly valid to have multiple window functions,\nit's just they must share the same window clause. I don't think\nthat's terrible as with the major use case that I have in mind for\nthis, the window function is only added to limit the number of rows.\nIn most cases I can imagine, there'd be no reason to have an\nadditional window function with different frame options.\n\nI've attached an updated patch.", "msg_date": "Wed, 23 Mar 2022 11:24:13 +1300", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Wed, 16 Mar 2022 at 10:24, Greg Stark <stark@mit.edu> wrote:\n>\n> This looks like an awesome addition.\n\nThanks\n\n> I have one technical questions...\n>\n> Is it possible to actually transform the row_number case into a LIMIT\n> clause or make the planner support for this case equivalent to it (in\n> which case we can replace the LIMIT clause planning to transform into\n> a window function)?\n\nCurrently, I have only coded it to support monotonically increasing\nand decreasing functions. Putting a <= <const> type condition on a\nrow_number() function with no PARTITION BY clause I think is logically\nthe same as a LIMIT clause, but that's not the case for rank() and\ndense_rank(). There may be multiple peer rows with the same rank in\nthose cases. We'd have no way to know what the LIMIT should be set to.\nI don't really want to just do this for row_number().\n\n> The reason I ask is because the Limit plan node is actually quite a\n> bit more optimized than the general window function plan node. It\n> calculates cost estimates based on the limit and can support Top-N\n> sort nodes.\n\nThis is true. There's perhaps no reason why an additional property\ncould not be added to allow the prosupport function to optionally set\n*exactly* the maximum number of rows that could match the condition.\ne.g. for select * from (select *,row_number() over (order by c) rn\nfrom ..) w where rn <= 10; that could be set to 10, and if we used\nrank() instead of row_number(), it could just be left unset.\n\nI think this is probably worth thinking about at some future date. I\ndon't really want to make it part of this effort. I also don't think\nI'm doing anything here that would need to be undone to make that\nwork.\n\nDavid\n\n\n", "msg_date": "Wed, 23 Mar 2022 11:35:53 +1300", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Thu, 17 Mar 2022 at 17:04, Corey Huinker <corey.huinker@gmail.com> wrote:\n> It seems like this effort would aid in implementing what some other databases implement via the QUALIFY clause, which is to window functions what HAVING is to aggregate functions.\n> example: https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#qualify_clause\n\nIsn't that just syntactic sugar? You could get the same from adding a\nsubquery where a WHERE clause to filter rows evaluated after the\nwindow clause.\n\nDavid\n\n\n", "msg_date": "Wed, 23 Mar 2022 11:39:13 +1300", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Tue, Mar 22, 2022 at 3:24 PM David Rowley <dgrowleyml@gmail.com> wrote:\n\n> On Thu, 26 Aug 2021 at 14:54, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n> >\n> > On Thu, Aug 19, 2021 at 2:35 PM David Rowley <dgrowleyml@gmail.com>\n> wrote:\n> > >\n> > > On Thu, 19 Aug 2021 at 00:20, Andy Fan <zhihui.fan1213@gmail.com>\n> wrote:\n> > > > In the current master, the result is:\n> > > >\n> > > > empno | salary | c | dr\n> > > > -------+--------+---+----\n> > > > 8 | 6000 | 4 | 1\n> > >\n> > > > In the patched version, the result is:\n> > > >\n> > > > empno | salary | c | dr\n> > > > -------+--------+---+----\n> > > > 8 | 6000 | 1 | 1\n> > >\n> > > Thanks for taking it for a spin.\n> > >\n> > > That's a bit unfortunate. I don't immediately see how to fix it other\n> > > than to restrict the optimisation to only apply when there's a single\n> > > WindowClause. It might be possible to relax it further and only apply\n> > > if it's the final window clause to be evaluated, but in those cases,\n> > > the savings are likely to be much less anyway as some previous\n> > > WindowAgg will have exhausted all rows from its subplan.\n> >\n> > I am trying to hack the select_active_windows function to make\n> > sure the WindowClause with Run Condition clause to be the last one\n> > to evaluate (we also need to consider more than 1 window func has\n> > run condition), at that time, the run condition clause is ready already.\n> >\n> > However there are two troubles in this direction: a). This may conflict\n> > with \"the windows that need the same sorting are adjacent in the list.\"\n> > b). \"when two or more windows are order-equivalent then all peer rows\n> > must be presented in the same order in all of them. .. (See General Rule\n> 4 of\n> > <window clause> in SQL2008 - SQL2016.)\"\n> >\n> > In summary, I am not sure if it is correct to change the execution Order\n> > of WindowAgg freely.\n>\n> Thanks for looking at that.\n>\n> My current thoughts are that it just feels a little too risky to\n> adjust the comparison function that sorts the window clauses to pay\n> attention to the run-condition.\n>\n> We would need to ensure that there's just a single window function\n> with a run condition as it wouldn't be valid for there to be multiple.\n> It would be easy enough to ensure we only push quals into just 1\n> window clause, but that and meddling with the evaluation order has\n> trade-offs. To do that properly, we'd likely want to consider the\n> costs when deciding which window clause would benefit from having\n> quals pushed the most. Plus, if we start messing with the evaluation\n> order then we'd likely really want some sort of costing to check if\n> pushing a qual and adjusting the evaluation order is actually cheaper\n> than not pushing the qual and keeping the clauses in the order that\n> requires the minimum number of sorts. The planner is not really\n> geared up for costing things like that properly, that's why we just\n> assume the order with the least sorts is best. In reality that's often\n> not going to be true as an index may exist and we might want to\n> evaluate a clause first if we could get rid of a sort and index scan\n> instead. Sorting the window clauses based on their SortGroupClause is\n> just the best we can do for now at that stage in planning.\n>\n> I think it's safer to just disable the optimisation when there are\n> multiple window clauses. Multiple matching clauses are merged\n> already, so it's perfectly valid to have multiple window functions,\n> it's just they must share the same window clause. I don't think\n> that's terrible as with the major use case that I have in mind for\n> this, the window function is only added to limit the number of rows.\n> In most cases I can imagine, there'd be no reason to have an\n> additional window function with different frame options.\n>\n> I've attached an updated patch.\n>\nHi,\nThe following code seems to be common between if / else blocks (w.r.t.\nwfunc_left) of find_window_run_conditions().\n\n+ op = get_opfamily_member(opinfo->opfamily_id,\n+ opinfo->oplefttype,\n+ opinfo->oprighttype,\n+ newstrategy);\n+\n+ newopexpr = (OpExpr *) make_opclause(op,\n+ opexpr->opresulttype,\n+ opexpr->opretset,\n+ otherexpr,\n+ (Expr *) wfunc,\n+ opexpr->opcollid,\n+ opexpr->inputcollid);\n+ newopexpr->opfuncid = get_opcode(op);\n+\n+ *keep_original = true;\n+ runopexpr = newopexpr;\n\nIt would be nice if this code can be shared.\n\n+ WindowClause *wclause = (WindowClause *)\n+ list_nth(subquery->windowClause,\n+ wfunc->winref - 1);\n\nThe code would be more readable if list_nth() is indented.\n\n+ /* Check the left side of the OpExpr */\n\nIt seems the code for checking left / right is the same. It would be better\nto extract and reuse the code.\n\nCheers\n\nOn Tue, Mar 22, 2022 at 3:24 PM David Rowley <dgrowleyml@gmail.com> wrote:On Thu, 26 Aug 2021 at 14:54, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n>\n> On Thu, Aug 19, 2021 at 2:35 PM David Rowley <dgrowleyml@gmail.com> wrote:\n> >\n> > On Thu, 19 Aug 2021 at 00:20, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n> > > In the current master, the result is:\n> > >\n> > >  empno | salary | c | dr\n> > > -------+--------+---+----\n> > >      8 |   6000 | 4 |  1\n> >\n> > > In the patched version, the result is:\n> > >\n> > >  empno | salary | c | dr\n> > > -------+--------+---+----\n> > >      8 |   6000 | 1 |  1\n> >\n> > Thanks for taking it for a spin.\n> >\n> > That's a bit unfortunate.  I don't immediately see how to fix it other\n> > than to restrict the optimisation to only apply when there's a single\n> > WindowClause. It might be possible to relax it further and only apply\n> > if it's the final window clause to be evaluated, but in those cases,\n> > the savings are likely to be much less anyway as some previous\n> > WindowAgg will have exhausted all rows from its subplan.\n>\n> I am trying to hack the select_active_windows function to make\n> sure the WindowClause with Run Condition clause to be the last one\n> to evaluate (we also need to consider more than 1 window func has\n> run condition), at that time, the run condition clause is ready already.\n>\n> However there are two troubles in this direction: a).  This may conflict\n> with \"the windows that need the same sorting are adjacent in the list.\"\n> b). \"when two or more windows are order-equivalent then all peer rows\n> must be presented in the same order in all of them. .. (See General Rule 4 of\n> <window clause> in SQL2008 - SQL2016.)\"\n>\n> In summary, I am not sure if it is correct to change the execution Order\n> of WindowAgg freely.\n\nThanks for looking at that.\n\nMy current thoughts are that it just feels a little too risky to\nadjust the comparison function that sorts the window clauses to pay\nattention to the run-condition.\n\nWe would need to ensure that there's just a single window function\nwith a run condition as it wouldn't be valid for there to be multiple.\nIt would be easy enough to ensure we only push quals into just 1\nwindow clause, but that and meddling with the evaluation order has\ntrade-offs.  To do that properly, we'd likely want to consider the\ncosts when deciding which window clause would benefit from having\nquals pushed the most.  Plus, if we start messing with the evaluation\norder then we'd likely really want some sort of costing to check if\npushing a qual and adjusting the evaluation order is actually cheaper\nthan not pushing the qual and keeping the clauses in the order that\nrequires the minimum number of sorts.   The planner is not really\ngeared up for costing things like that properly, that's why we just\nassume the order with the least sorts is best. In reality that's often\nnot going to be true as an index may exist and we might want to\nevaluate a clause first if we could get rid of a sort and index scan\ninstead. Sorting the window clauses based on their SortGroupClause is\njust the best we can do for now at that stage in planning.\n\nI think it's safer to just disable the optimisation when there are\nmultiple window clauses.  Multiple matching clauses are merged\nalready, so it's perfectly valid to have multiple window functions,\nit's just they must share the same window clause.  I don't think\nthat's terrible as with the major use case that I have in mind for\nthis, the window function is only added to limit the number of rows.\nIn most cases I can imagine, there'd be no reason to have an\nadditional window function with different frame options.\n\nI've attached an updated patch.Hi,The following code seems to be common between if / else blocks (w.r.t. wfunc_left) of find_window_run_conditions().+               op = get_opfamily_member(opinfo->opfamily_id,+                                        opinfo->oplefttype,+                                        opinfo->oprighttype,+                                        newstrategy);++               newopexpr = (OpExpr *) make_opclause(op,+                                                    opexpr->opresulttype,+                                                    opexpr->opretset,+                                                    otherexpr,+                                                    (Expr *) wfunc,+                                                    opexpr->opcollid,+                                                    opexpr->inputcollid);+               newopexpr->opfuncid = get_opcode(op);++               *keep_original = true;+               runopexpr = newopexpr; It would be nice if this code can be shared.+           WindowClause *wclause = (WindowClause *)+           list_nth(subquery->windowClause,+                    wfunc->winref - 1);The code would be more readable if list_nth() is indented.+   /* Check the left side of the OpExpr */It seems the code for checking left / right is the same. It would be better to extract and reuse the code.Cheers", "msg_date": "Tue, 22 Mar 2022 16:54:08 -0700", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Wed, 23 Mar 2022 at 12:50, Zhihong Yu <zyu@yugabyte.com> wrote:\n> The following code seems to be common between if / else blocks (w.r.t. wfunc_left) of find_window_run_conditions().\n\n> It would be nice if this code can be shared.\n\nI remember thinking about that and thinking that I didn't want to\novercomplicate the if conditions for the strategy tests. I'd thought\nthese would have become:\n\nif ((wfunc_left && (strategy == BTLessStrategyNumber ||\n strategy == BTLessEqualStrategyNumber)) ||\n (!wfunc_left && (strategy == BTGreaterStrategyNumber ||\n strategy == BTGreaterEqualStrategyNumber)))\n\nwhich I didn't think was very readable. That caused me to keep it separate.\n\nOn reflection, we can just leave the strategy checks as they are, then\nadd the additional code for checking wfunc_left when checking the\nres->monotonic, i.e:\n\nif ((wfunc_left && (res->monotonic & MONOTONICFUNC_INCREASING)) ||\n (!wfunc_left && (res->monotonic & MONOTONICFUNC_DECREASING)))\n\nI think that's more readable than doubling up the strategy checks, so\nI've done it that way in the attached.\n\n>\n> + WindowClause *wclause = (WindowClause *)\n> + list_nth(subquery->windowClause,\n> + wfunc->winref - 1);\n>\n> The code would be more readable if list_nth() is indented.\n\nThat's just the way pgindent put it.\n\n> + /* Check the left side of the OpExpr */\n>\n> It seems the code for checking left / right is the same. It would be better to extract and reuse the code.\n\nI've moved some of that code into find_window_run_conditions() which\nremoves about 10 lines of code.\n\nUpdated patch attached. Thanks for looking.\n\nDavid", "msg_date": "Wed, 23 Mar 2022 16:23:02 +1300", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Wed, 23 Mar 2022 at 11:24, David Rowley <dgrowleyml@gmail.com> wrote:\n> I think it's safer to just disable the optimisation when there are\n> multiple window clauses. Multiple matching clauses are merged\n> already, so it's perfectly valid to have multiple window functions,\n> it's just they must share the same window clause. I don't think\n> that's terrible as with the major use case that I have in mind for\n> this, the window function is only added to limit the number of rows.\n> In most cases I can imagine, there'd be no reason to have an\n> additional window function with different frame options.\n\nI've not looked into the feasibility of it, but I had a thought that\nwe could just accumulate all the run-conditions in a new field in the\nPlannerInfo then just tag them onto the top-level WindowAgg when\nbuilding the plan.\n\nI'm just not sure it would be any more useful than what the v3 patch\nis currently doing as intermediate WindowAggs would still need to\nprocess all rows. I think it would only save the window function\nevaluation of the top-level WindowAgg for rows that don't match the\nrun-condition. All the supported window functions are quite cheap, so\nit's not a huge saving. I'd bet there would be example cases where it\nwould be measurable though.\n\nDavid\n\n\n", "msg_date": "Wed, 23 Mar 2022 16:30:27 +1300", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Tue, Mar 22, 2022 at 3:39 PM David Rowley <dgrowleyml@gmail.com> wrote:\n\n> On Thu, 17 Mar 2022 at 17:04, Corey Huinker <corey.huinker@gmail.com>\n> wrote:\n> > It seems like this effort would aid in implementing what some other\n> databases implement via the QUALIFY clause, which is to window functions\n> what HAVING is to aggregate functions.\n> > example:\n> https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#qualify_clause\n>\n> Isn't that just syntactic sugar? You could get the same from adding a\n> subquery where a WHERE clause to filter rows evaluated after the\n> window clause.\n>\n>\nI'd like some of that syntactic sugar please. It goes nicely with my\nHAVING syntactic coffee.\n\nDavid J.\n\nOn Tue, Mar 22, 2022 at 3:39 PM David Rowley <dgrowleyml@gmail.com> wrote:On Thu, 17 Mar 2022 at 17:04, Corey Huinker <corey.huinker@gmail.com> wrote:\n> It seems like this effort would aid in implementing what some other databases implement via the QUALIFY clause, which is to window functions what HAVING is to aggregate functions.\n> example: https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#qualify_clause\n\nIsn't that just syntactic sugar?  You could get the same from adding a\nsubquery where a WHERE clause to filter rows evaluated after the\nwindow clause.I'd like some of that syntactic sugar please.  It goes nicely with my HAVING syntactic coffee.David J.", "msg_date": "Tue, 22 Mar 2022 22:09:27 -0700", "msg_from": "\"David G. Johnston\" <david.g.johnston@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Wed, 23 Mar 2022 at 16:30, David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Wed, 23 Mar 2022 at 11:24, David Rowley <dgrowleyml@gmail.com> wrote:\n> > I think it's safer to just disable the optimisation when there are\n> > multiple window clauses. Multiple matching clauses are merged\n> > already, so it's perfectly valid to have multiple window functions,\n> > it's just they must share the same window clause. I don't think\n> > that's terrible as with the major use case that I have in mind for\n> > this, the window function is only added to limit the number of rows.\n> > In most cases I can imagine, there'd be no reason to have an\n> > additional window function with different frame options.\n>\n> I've not looked into the feasibility of it, but I had a thought that\n> we could just accumulate all the run-conditions in a new field in the\n> PlannerInfo then just tag them onto the top-level WindowAgg when\n> building the plan.\n>\n> I'm just not sure it would be any more useful than what the v3 patch\n> is currently doing as intermediate WindowAggs would still need to\n> process all rows. I think it would only save the window function\n> evaluation of the top-level WindowAgg for rows that don't match the\n> run-condition. All the supported window functions are quite cheap, so\n> it's not a huge saving. I'd bet there would be example cases where it\n> would be measurable though.\n\nAnother way of doing this that seems better is to make it so only the\ntop-level WindowAgg will stop processing when the run condition\nbecomes false. Any intermediate WindowAggs must continue processing\ntuples, but may skip evaluation of their WindowFuncs.\n\nDoing things this way also allows us to handle cases where there is a\nPARTITION BY clause, however, in this case, the top-level WindowAgg\nmust not stop processing and return NULL, instead, it can just act as\nif it were an intermediate WindowAgg and just stop evaluating\nWindowFuncs. The top-level WindowAgg must continue processing the\ntuples so that the other partitions are also processed.\n\nI made the v4 patch do things this way and tested the performance of\nit vs current master. Test 1 and 2 have PARTITION BY clauses. There's\na small performance increase from not evaluating the row_number()\nfunction once rn <= 2 is no longer true.\n\nTest 3 shows the same speedup as the original patch where we just stop\nprocessing any further tuples when the run condition is no longer true\nand there is no PARTITION BY clause.\n\nSetup:\ncreate table xy (x int, y int);\ninsert into xy select x,y from generate_series(1,1000)x,\ngenerate_Series(1,1000)y;\ncreate index on xy(x,y);\nvacuum analyze xy;\n\nTest 1:\n\nexplain analyze select * from (select x,y,row_number() over (partition\nby x order by y) rn from xy) as xy where rn <= 2;\n\nMaster:\n\nExecution Time: 359.553 ms\nExecution Time: 354.235 ms\nExecution Time: 357.646 ms\n\nv4 patch:\n\nExecution Time: 346.641 ms\nExecution Time: 337.131 ms\nExecution Time: 336.531 ms\n\n(5% faster)\n\nTest 2:\n\nexplain analyze select * from (select x,y,row_number() over (partition\nby x order by y) rn from xy) as xy where rn = 1;\n\nMaster:\n\nExecution Time: 359.046 ms\nExecution Time: 357.601 ms\nExecution Time: 357.977 ms\n\nv4 patch:\n\nExecution Time: 336.540 ms\nExecution Time: 337.024 ms\nExecution Time: 342.706 ms\n\n(5.7% faster)\n\nTest 3:\n\nexplain analyze select * from (select x,y,row_number() over (order by\nx,y) rn from xy) as xy where rn <= 2;\n\nMaster:\n\nExecution Time: 362.322 ms\nExecution Time: 348.812 ms\nExecution Time: 349.471 ms\n\nv4 patch:\n\nExecution Time: 0.060 ms\nExecution Time: 0.037 ms\nExecution Time: 0.037 ms\n\n(~8000x faster)\n\nOne thing which I'm not sure about with the patch is how I'm handling\nthe evaluation of the runcondition in nodeWindowAgg.c. Instead of\nhaving ExecQual() evaluate an OpExpr such as \"row_number() over (...)\n<= 10\", I'm replacing the WindowFunc with the Var in the targetlist\nthat corresponds to the given WindowFunc. This saves having to double\nevaluate the WindowFunc. Instead, the value of the Var can be taken\ndirectly from the slot. I don't know of anywhere else we do things\nquite like that. The runcondition is slightly similar to HAVING\nclauses, but HAVING clauses don't work this way. Maybe they would\nhave if slots had existed back then. Or maybe it's a bad idea to set a\nprecedent that the targetlist Vars must be evaluated already. Does\nanyone have any thoughts on this part?\n\nv4 patch attached.\n\nDavid", "msg_date": "Tue, 29 Mar 2022 15:11:52 +1300", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "Hi,\n\nOn 2022-03-29 15:11:52 +1300, David Rowley wrote:\n> One thing which I'm not sure about with the patch is how I'm handling\n> the evaluation of the runcondition in nodeWindowAgg.c. Instead of\n> having ExecQual() evaluate an OpExpr such as \"row_number() over (...)\n> <= 10\", I'm replacing the WindowFunc with the Var in the targetlist\n> that corresponds to the given WindowFunc. This saves having to double\n> evaluate the WindowFunc. Instead, the value of the Var can be taken\n> directly from the slot. I don't know of anywhere else we do things\n> quite like that. The runcondition is slightly similar to HAVING\n> clauses, but HAVING clauses don't work this way.\n\nDon't HAVING clauses actually work pretty similar? Yes, they don't have a Var,\nbut for expression evaluation purposes an Aggref is nearly the same as a plain\nVar:\n\n EEO_CASE(EEOP_INNER_VAR)\n {\n int attnum = op->d.var.attnum;\n\n /*\n * Since we already extracted all referenced columns from the\n * tuple with a FETCHSOME step, we can just grab the value\n * directly out of the slot's decomposed-data arrays. But let's\n * have an Assert to check that that did happen.\n */\n Assert(attnum >= 0 && attnum < innerslot->tts_nvalid);\n *op->resvalue = innerslot->tts_values[attnum];\n *op->resnull = innerslot->tts_isnull[attnum];\n\n EEO_NEXT();\n }\nvs\n EEO_CASE(EEOP_AGGREF)\n {\n /*\n * Returns a Datum whose value is the precomputed aggregate value\n * found in the given expression context.\n */\n int aggno = op->d.aggref.aggno;\n\n Assert(econtext->ecxt_aggvalues != NULL);\n\n *op->resvalue = econtext->ecxt_aggvalues[aggno];\n *op->resnull = econtext->ecxt_aggnulls[aggno];\n\n EEO_NEXT();\n }\n\nspecifically we don't re-evaluate expressions?\n\nThis is afaics slightly cheaper than referencing a variable in a slot.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Tue, 29 Mar 2022 15:16:15 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "Thanks for having a look at this.\n\nOn Wed, 30 Mar 2022 at 11:16, Andres Freund <andres@anarazel.de> wrote:\n> On 2022-03-29 15:11:52 +1300, David Rowley wrote:\n> > One thing which I'm not sure about with the patch is how I'm handling\n> > the evaluation of the runcondition in nodeWindowAgg.c. Instead of\n> > having ExecQual() evaluate an OpExpr such as \"row_number() over (...)\n> > <= 10\", I'm replacing the WindowFunc with the Var in the targetlist\n> > that corresponds to the given WindowFunc. This saves having to double\n> > evaluate the WindowFunc. Instead, the value of the Var can be taken\n> > directly from the slot. I don't know of anywhere else we do things\n> > quite like that. The runcondition is slightly similar to HAVING\n> > clauses, but HAVING clauses don't work this way.\n>\n> Don't HAVING clauses actually work pretty similar? Yes, they don't have a Var,\n> but for expression evaluation purposes an Aggref is nearly the same as a plain\n> Var:\n>\n> EEO_CASE(EEOP_INNER_VAR)\n> {\n> int attnum = op->d.var.attnum;\n>\n> /*\n> * Since we already extracted all referenced columns from the\n> * tuple with a FETCHSOME step, we can just grab the value\n> * directly out of the slot's decomposed-data arrays. But let's\n> * have an Assert to check that that did happen.\n> */\n> Assert(attnum >= 0 && attnum < innerslot->tts_nvalid);\n> *op->resvalue = innerslot->tts_values[attnum];\n> *op->resnull = innerslot->tts_isnull[attnum];\n>\n> EEO_NEXT();\n> }\n> vs\n> EEO_CASE(EEOP_AGGREF)\n> {\n> /*\n> * Returns a Datum whose value is the precomputed aggregate value\n> * found in the given expression context.\n> */\n> int aggno = op->d.aggref.aggno;\n>\n> Assert(econtext->ecxt_aggvalues != NULL);\n>\n> *op->resvalue = econtext->ecxt_aggvalues[aggno];\n> *op->resnull = econtext->ecxt_aggnulls[aggno];\n>\n> EEO_NEXT();\n> }\n>\n> specifically we don't re-evaluate expressions?\n\nThanks for highlighting the similarities. I'm feeling better about the\nchoice now.\n\nI've made another pass over the patch and updated a few comments and\nmade a small code change to delay the initialisation of a variable.\n\nI'm pretty happy with this now. If anyone wants to have a look at\nthis, can they do so or let me know they're going to within the next\n24 hours. Otherwise I plan to move into commit mode with it.\n\n> This is afaics slightly cheaper than referencing a variable in a slot.\n\nI guess you must mean cheaper because it means there will be no\nEEOP_*_FETCHSOME step? Otherwise it seems a fairly similar amount of\nwork.\n\nDavid", "msg_date": "Tue, 5 Apr 2022 12:04:18 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": ">\n>\n> I'm pretty happy with this now. If anyone wants to have a look at\n> this, can they do so or let me know they're going to within the next\n> 24 hours. Otherwise I plan to move into commit mode with it.\n>\n>\nI just came to the office today to double check this patch. I probably can\nfinish it very soon. But if you are willing to commit it sooner, I am\ntotally\nfine with it.\n\n\n-- \nBest Regards\nAndy Fan\n\n\nI'm pretty happy with this now. If anyone wants to have a look at\nthis, can they do so or let me know they're going to within the next\n24 hours.  Otherwise I plan to move into commit mode with it.\n I just came to the office today to double check this patch.  I probably canfinish it very soon. But if you are willing to commit it sooner,  I am totallyfine with it.  -- Best RegardsAndy Fan", "msg_date": "Tue, 5 Apr 2022 10:35:31 +0800", "msg_from": "Andy Fan <zhihui.fan1213@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On 2022-04-05 12:04:18 +1200, David Rowley wrote:\n> > This is afaics slightly cheaper than referencing a variable in a slot.\n> \n> I guess you must mean cheaper because it means there will be no\n> EEOP_*_FETCHSOME step? Otherwise it seems a fairly similar amount of\n> work.\n\nThat, and slightly fewer indirections for accessing values IIRC.\n\n\n", "msg_date": "Mon, 4 Apr 2022 20:09:57 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "Hi David:\n\n\nI just came to the office today to double check this patch. I probably can\n> finish it very soon.\n>\n\nI would share my current review result first and more review is still in\nprogress.\nThere is a lot of amazing stuff there but I'd save the simple +1 and just\nshare\nsomething I'm not fully understand now. I just focused on the execution\npart and\nonly 1 WindowAgg node situation right now.\n\n1. We can do more on PASSTHROUGH, we just bypass the window function\ncurrently, but IIUC we can ignore all of the following tuples in current\npartition\nonce we go into this mode. patch 0001 shows what I mean.\n\n--- without patch 0001, we need 1653 ms for the below query, with the\npatch 0001,\n--- we need 629ms. This is not a serious performance comparison since I\n--- build software with -O0 and --enable_cassert. but it can show some\nimprovement.\npostgres=# explain analyze select * from (select x,y,row_number() over\n(partition\nby x order by y) rn from xy) as xy where rn < 2;\n QUERY\nPLAN\n-------------------------------------------------------------------------------------------------------------------------------------------------------\n Subquery Scan on xy (cost=0.42..55980.43 rows=5000 width=16) (actual\ntime=0.072..1653.631 rows=1000 loops=1)\n Filter: (xy.rn = 1)\n Rows Removed by Filter: 999000\n -> WindowAgg (cost=0.42..43480.43 rows=1000000 width=16) (actual\ntime=0.069..1494.553 rows=1000000 loops=1)\n Run Condition: (row_number() OVER (?) < 2)\n -> Index Only Scan using xy_x_y_idx on xy xy_1\n (cost=0.42..25980.42 rows=1000000 width=8) (actual time=0.047..330.283\nrows=1000000 loops=1)\n Heap Fetches: 0\n Planning Time: 0.240 ms\n Execution Time: 1653.913 ms\n(9 rows)\n\n\npostgres=# explain analyze select * from (select x,y,row_number() over\n(partition\nby x order by y) rn from xy) as xy where rn < 2;\n QUERY\nPLAN\n-------------------------------------------------------------------------------------------------------------------------------------------------------\n Subquery Scan on xy (cost=0.42..55980.43 rows=5000 width=16) (actual\ntime=0.103..629.428 rows=1000 loops=1)\n Filter: (xy.rn < 2)\n Rows Removed by Filter: 1000\n -> WindowAgg (cost=0.42..43480.43 rows=1000000 width=16) (actual\ntime=0.101..628.821 rows=2000 loops=1)\n Run Condition: (row_number() OVER (?) < 2)\n -> Index Only Scan using xy_x_y_idx on xy xy_1\n (cost=0.42..25980.42 rows=1000000 width=8) (actual time=0.063..281.715\nrows=1000000 loops=1)\n Heap Fetches: 0\n Planning Time: 1.119 ms\n Execution Time: 629.781 ms\n(9 rows)\n\nTime: 633.241 ms\n\n\n2. the \"Rows Removed by Filter: 1000\" is strange to me for the above\nexample.\n\n Subquery Scan on xy (cost=0.42..55980.43 rows=5000 width=16) (actual\ntime=0.103..629.428 rows=1000 loops=1)\n Filter: (xy.rn < 2)\n Rows Removed by Filter: 1000\n\nThe root cause is even ExecQual(winstate->runcondition, econtext) return\nfalse, we\nstill return the slot to the upper node. A simple hack can avoid it.\n\n3. With the changes in 2, I think we can avoid the subquery node totally\nfor the above query.\n\n4. If all the above are correct, looks the enum WindowAggStatus addition is\nnot a\nmust since we can do what WINDOWAGG_PASSTHROUGH does just when we find it\nis, like\npatch 3 shows. (I leave WINDOWAGG_DONE only, but it can be replaced with\nprevious all_done field).\n\nFinally, Thanks for the patch, it is a good material to study the knowledge\nin this area.\n\n-- \nBest Regards\nAndy Fan", "msg_date": "Tue, 5 Apr 2022 15:38:44 +0800", "msg_from": "Andy Fan <zhihui.fan1213@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": ">\n> The root cause is even ExecQual(winstate->runcondition, econtext) return\n> false, we\n> still return the slot to the upper node. A simple hack can avoid it.\n>\n\nForget to say 0002 shows what I mean.\n\n-- \nBest Regards\nAndy Fan\n\nThe root cause is even ExecQual(winstate->runcondition, econtext) return false, westill return the slot to the upper node.  A simple hack can avoid it.Forget to say 0002 shows what I mean.  -- Best RegardsAndy Fan", "msg_date": "Tue, 5 Apr 2022 15:40:29 +0800", "msg_from": "Andy Fan <zhihui.fan1213@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Tue, 5 Apr 2022 at 19:38, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n> 1. We can do more on PASSTHROUGH, we just bypass the window function\n> currently, but IIUC we can ignore all of the following tuples in current partition\n> once we go into this mode. patch 0001 shows what I mean.\n\nYeah, there is more performance to be had than even what you've done\nthere. There's no reason really for spool_tuples() to do\ntuplestore_puttupleslot() when we're not in run mode.\n\nThe attached should give slightly more performance. I'm unsure if\nthere's more that can be done for window aggregates, i.e.\neval_windowaggregates()\n\nI'll consider the idea about doing all the filtering in\nnodeWindowAgg.c. For now I made find_window_run_conditions() keep the\nqual so that it's still filtered in the subquery level when there is a\nPARTITION BY clause. Probably the best way would be to make\nnodeWindowAgg.c just loop with a for(;;) loop. I'll need to give it\nmore thought. I'll do that in the morning.\n\nDavid", "msg_date": "Tue, 5 Apr 2022 23:49:15 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Tue, Apr 5, 2022 at 7:49 PM David Rowley <dgrowleyml@gmail.com> wrote:\n\n> On Tue, 5 Apr 2022 at 19:38, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n> > 1. We can do more on PASSTHROUGH, we just bypass the window function\n> > currently, but IIUC we can ignore all of the following tuples in\n> current partition\n> > once we go into this mode. patch 0001 shows what I mean.\n>\n> Yeah, there is more performance to be had than even what you've done\n> there. There's no reason really for spool_tuples() to do\n> tuplestore_puttupleslot() when we're not in run mode.\n>\n\nYeah, this is a great idea.\n\nThe attached should give slightly more performance. I'm unsure if\n> there's more that can be done for window aggregates, i.e.\n> eval_windowaggregates()\n>\n> I'll consider the idea about doing all the filtering in\n> nodeWindowAgg.c. For now I made find_window_run_conditions() keep the\n> qual so that it's still filtered in the subquery level when there is a\n> PARTITION BY clause. Probably the best way would be to make\n\nnodeWindowAgg.c just loop with a for(;;) loop. I'll need to give it\n> more thought. I'll do that in the morning.\n>\n>\nI just finished the planner part review and thought about the\nmulti activeWindows\ncases, I think passthrough mode should be still needed but just for multi\nactiveWindow cases, In the passthrough mode, we can not discard the tuples\nin the same partition. Just that PARTITION BY clause should not be the\nrequirement\nfor passthrough mode and we can do such optimization. We can discuss\nmore after your final decision.\n\nAnd I would suggest the below fastpath for this feature.\n\n@@ -2535,7 +2535,7 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo\n*rel,\n * if it happens to reference a window\nfunction. If so then\n * it might be useful to use for the\nWindowAgg's runCondition.\n */\n- if (check_and_push_window_quals(subquery,\nrte, rti, clause))\n+ if (!subquery->hasWindowFuncs ||\ncheck_and_push_window_quals(subquery, rte, rti, clause))\n {\n /*\n * It's not a suitable window run\ncondition qual or it is,\n\n-- \nBest Regards\nAndy Fan\n\nOn Tue, Apr 5, 2022 at 7:49 PM David Rowley <dgrowleyml@gmail.com> wrote:On Tue, 5 Apr 2022 at 19:38, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n> 1. We can do more on PASSTHROUGH, we just bypass the window function\n> currently,  but IIUC we can ignore all of the following tuples in current partition\n> once we go into this mode.  patch 0001 shows what I mean.\n\nYeah, there is more performance to be had than even what you've done\nthere.  There's no reason really for spool_tuples() to do\ntuplestore_puttupleslot() when we're not in run mode.Yeah, this is a great idea. \nThe attached should give slightly more performance.  I'm unsure if\nthere's more that can be done for window aggregates, i.e.\neval_windowaggregates()\n\nI'll consider the idea about doing all the filtering in\nnodeWindowAgg.c. For now I made find_window_run_conditions() keep the\nqual so that it's still filtered in the subquery level when there is a\nPARTITION BY clause. Probably the best way would be to make\nnodeWindowAgg.c just loop with a for(;;) loop. I'll need to give it\nmore thought. I'll do that in the morning. I just finished the planner part review and thought about the multi activeWindowscases,  I think passthrough mode should be still needed but just for multiactiveWindow cases, In the passthrough mode,  we can not discard the tuplesin the same partition.  Just that PARTITION BY clause should not be the requirementfor passthrough mode and we can do such optimization.  We can discuss more after your final decision. And I would suggest the below fastpath for this feature. @@ -2535,7 +2535,7 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,                                 * if it happens to reference a window function.  If so then                                 * it might be useful to use for the WindowAgg's runCondition.                                 */-                               if (check_and_push_window_quals(subquery, rte, rti, clause))+                               if (!subquery->hasWindowFuncs || check_and_push_window_quals(subquery, rte, rti, clause))                                {                                        /*                                         * It's not a suitable window run condition qual or it is,-- Best RegardsAndy Fan", "msg_date": "Tue, 5 Apr 2022 20:59:10 +0800", "msg_from": "Andy Fan <zhihui.fan1213@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Wed, 6 Apr 2022 at 00:59, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n>\n> On Tue, Apr 5, 2022 at 7:49 PM David Rowley <dgrowleyml@gmail.com> wrote:\n>> Yeah, there is more performance to be had than even what you've done\n>> there. There's no reason really for spool_tuples() to do\n>> tuplestore_puttupleslot() when we're not in run mode.\n>\n>\n> Yeah, this is a great idea.\n\nI've attached an updated patch that does most of what you mentioned.\nTo make this work I had to add another state to the WindowAggStatus.\nThis new state is what the top-level WindowAgg will move into when\nthere's a PARTITION BY clause and the run condition becomes false.\nThe new state is named WINDOWAGG_PASSTHROUGH_STRICT, which does all\nthat WINDOWAGG_PASSTHROUGH does plus skips tuplestoring tuples during\nthe spool. We must still spool those tuples when we're not the\ntop-level WindowAgg so that we can send those out to any calling\nWindowAgg nodes. They'll need those so they return the correct result.\n\nThis means that for intermediate WindowAgg nodes, when the\nruncondition becomes false, we only skip evaluation of WindowFuncs.\nWindowAgg nodes above us cannot reference these, so there's no need to\nevaluate them, plus, if there's a run condition then these tuples will\nbe filtered out in the final WindowAgg node.\n\nFor the top-level WindowAgg node, when the run condition becomes false\nwe can save quite a bit more work. If there's no PARTITION BY clause,\nthen we're done. Just return NULL. When there is a PARTITION BY\nclause we move into WINDOWAGG_PASSTHROUGH_STRICT which allows us to\nskip both the evaluation of WindowFuncs and also allows us to consume\ntuples from our outer plan until we get a tuple belonging to another\npartition. No need to tuplestore these tuples as they're being\nfiltered out.\n\nSince intermediate WindowAggs cannot filter tuples, all the filtering\nmust occur in the top-level WindowAgg. This cannot be done by way of\nthe run condition as the run condition is special as when it becomes\nfalse, we don't check again to see if it's become true. A sort node\nbetween the WindowAggs can change the tuple order (i.e previously\nmonotonic values may no longer be monotonic) so it's only valid to\nevaluate the run condition that's meant for the WindowAgg node it was\nintended for. To filter out the tuples that don't match the run\ncondition from intermediate WindowAggs in the top-level WindowAgg,\nwhat I've done is introduced quals for WindowAgg nodes. This means\nthat we can now see Filter in EXPLAIN For WindowAgg and \"Rows Removed\nby Filter\".\n\nWhy didn't I just do the filtering in the outer query like was\nhappening before? The problem is that when we push the quals down\ninto the subquery, we don't yet have knowledge of which order that the\nWindowAggs will be evaluated in. Only run conditions from\nintermediate WindowAggs will ever make it into the Filter, and we\ndon't know which one the top-level WindowAgg will be until later in\nplanning. To do the filtering in the outer query we'd need to push\nquals back out the subquery again. It seems to me to be easier and\nbetter to filter them out lower down in the plan.\n\nSince the top-level WindowAgg node can now filter tuples, the executor\nnode had to be given a for(;;) loop so that it goes around again for\nanother tuple after it filters a tuple out.\n\nI've also updated the commit message which I think I've made quite\nclear about what we optimise and how it's done.\n\n> And I would suggest the below fastpath for this feature.\n> - if (check_and_push_window_quals(subquery, rte, rti, clause))\n> + if (!subquery->hasWindowFuncs || check_and_push_window_quals(subquery, rte, rti, clause))\n\nGood idea. Thanks!\n\nDavid", "msg_date": "Thu, 7 Apr 2022 14:36:37 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Wed, Apr 6, 2022 at 7:36 PM David Rowley <dgrowleyml@gmail.com> wrote:\n\n> On Wed, 6 Apr 2022 at 00:59, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n> >\n> > On Tue, Apr 5, 2022 at 7:49 PM David Rowley <dgrowleyml@gmail.com>\n> wrote:\n> >> Yeah, there is more performance to be had than even what you've done\n> >> there. There's no reason really for spool_tuples() to do\n> >> tuplestore_puttupleslot() when we're not in run mode.\n> >\n> >\n> > Yeah, this is a great idea.\n>\n> I've attached an updated patch that does most of what you mentioned.\n> To make this work I had to add another state to the WindowAggStatus.\n> This new state is what the top-level WindowAgg will move into when\n> there's a PARTITION BY clause and the run condition becomes false.\n> The new state is named WINDOWAGG_PASSTHROUGH_STRICT, which does all\n> that WINDOWAGG_PASSTHROUGH does plus skips tuplestoring tuples during\n> the spool. We must still spool those tuples when we're not the\n> top-level WindowAgg so that we can send those out to any calling\n> WindowAgg nodes. They'll need those so they return the correct result.\n>\n> This means that for intermediate WindowAgg nodes, when the\n> runcondition becomes false, we only skip evaluation of WindowFuncs.\n> WindowAgg nodes above us cannot reference these, so there's no need to\n> evaluate them, plus, if there's a run condition then these tuples will\n> be filtered out in the final WindowAgg node.\n>\n> For the top-level WindowAgg node, when the run condition becomes false\n> we can save quite a bit more work. If there's no PARTITION BY clause,\n> then we're done. Just return NULL. When there is a PARTITION BY\n> clause we move into WINDOWAGG_PASSTHROUGH_STRICT which allows us to\n> skip both the evaluation of WindowFuncs and also allows us to consume\n> tuples from our outer plan until we get a tuple belonging to another\n> partition. No need to tuplestore these tuples as they're being\n> filtered out.\n>\n> Since intermediate WindowAggs cannot filter tuples, all the filtering\n> must occur in the top-level WindowAgg. This cannot be done by way of\n> the run condition as the run condition is special as when it becomes\n> false, we don't check again to see if it's become true. A sort node\n> between the WindowAggs can change the tuple order (i.e previously\n> monotonic values may no longer be monotonic) so it's only valid to\n> evaluate the run condition that's meant for the WindowAgg node it was\n> intended for. To filter out the tuples that don't match the run\n> condition from intermediate WindowAggs in the top-level WindowAgg,\n> what I've done is introduced quals for WindowAgg nodes. This means\n> that we can now see Filter in EXPLAIN For WindowAgg and \"Rows Removed\n> by Filter\".\n>\n> Why didn't I just do the filtering in the outer query like was\n> happening before? The problem is that when we push the quals down\n> into the subquery, we don't yet have knowledge of which order that the\n> WindowAggs will be evaluated in. Only run conditions from\n> intermediate WindowAggs will ever make it into the Filter, and we\n> don't know which one the top-level WindowAgg will be until later in\n> planning. To do the filtering in the outer query we'd need to push\n> quals back out the subquery again. It seems to me to be easier and\n> better to filter them out lower down in the plan.\n>\n> Since the top-level WindowAgg node can now filter tuples, the executor\n> node had to be given a for(;;) loop so that it goes around again for\n> another tuple after it filters a tuple out.\n>\n> I've also updated the commit message which I think I've made quite\n> clear about what we optimise and how it's done.\n>\n> > And I would suggest the below fastpath for this feature.\n> > - if\n> (check_and_push_window_quals(subquery, rte, rti, clause))\n> > + if (!subquery->hasWindowFuncs ||\n> check_and_push_window_quals(subquery, rte, rti, clause))\n>\n> Good idea. Thanks!\n>\n> David\n>\nHi,\n\n+ * We must keep the original qual in place if there is a\n+ * PARTITION BY clause as the top-level WindowAgg remains in\n+ * pass-through mode and does nothing to filter out unwanted\n+ * tuples.\n+ */\n+ *keep_original = false;\n\nThe comment talks about keeping original qual but the assignment uses the\nvalue false.\nMaybe the comment can be rephrased so that it matches the assignment.\n\nCheers\n\nOn Wed, Apr 6, 2022 at 7:36 PM David Rowley <dgrowleyml@gmail.com> wrote:On Wed, 6 Apr 2022 at 00:59, Andy Fan <zhihui.fan1213@gmail.com> wrote:\n>\n> On Tue, Apr 5, 2022 at 7:49 PM David Rowley <dgrowleyml@gmail.com> wrote:\n>> Yeah, there is more performance to be had than even what you've done\n>> there.  There's no reason really for spool_tuples() to do\n>> tuplestore_puttupleslot() when we're not in run mode.\n>\n>\n> Yeah, this is a great idea.\n\nI've attached an updated patch that does most of what you mentioned.\nTo make this work I had to add another state to the WindowAggStatus.\nThis new state is what the top-level WindowAgg will move into when\nthere's a PARTITION BY clause and the run condition becomes false.\nThe new state is named WINDOWAGG_PASSTHROUGH_STRICT, which does all\nthat WINDOWAGG_PASSTHROUGH does plus skips tuplestoring tuples during\nthe spool.  We must still spool those tuples when we're not the\ntop-level WindowAgg so that we can send those out to any calling\nWindowAgg nodes. They'll need those so they return the correct result.\n\nThis means that for intermediate WindowAgg nodes, when the\nruncondition becomes false, we only skip evaluation of WindowFuncs.\nWindowAgg nodes above us cannot reference these, so there's no need to\nevaluate them, plus, if there's a run condition then these tuples will\nbe filtered out in the final WindowAgg node.\n\nFor the top-level WindowAgg node, when the run condition becomes false\nwe can save quite a bit more work. If there's no PARTITION BY clause,\nthen we're done. Just return NULL.  When there is a PARTITION BY\nclause we move into WINDOWAGG_PASSTHROUGH_STRICT which allows us to\nskip both the evaluation of WindowFuncs and also allows us to consume\ntuples from our outer plan until we get a tuple belonging to another\npartition.  No need to tuplestore these tuples as they're being\nfiltered out.\n\nSince intermediate WindowAggs cannot filter tuples, all the filtering\nmust occur in the top-level WindowAgg.  This cannot be done by way of\nthe run condition as the run condition is special as when it becomes\nfalse, we don't check again to see if it's become true.  A sort node\nbetween the WindowAggs can change the tuple order (i.e previously\nmonotonic values may no longer be monotonic) so it's only valid to\nevaluate the run condition that's meant for the WindowAgg node it was\nintended for.  To filter out the tuples that don't match the run\ncondition from intermediate WindowAggs in the top-level WindowAgg,\nwhat I've done is introduced quals for WindowAgg nodes.  This means\nthat we can now see Filter in EXPLAIN For WindowAgg and \"Rows Removed\nby Filter\".\n\nWhy didn't I just do the filtering in the outer query like was\nhappening before?  The problem is that when we push the quals down\ninto the subquery, we don't yet have knowledge of which order that the\nWindowAggs will be evaluated in.  Only run conditions from\nintermediate WindowAggs will ever make it into the Filter, and we\ndon't know which one the top-level WindowAgg will be until later in\nplanning. To do the filtering in the outer query we'd need to push\nquals back out the subquery again. It seems to me to be easier and\nbetter to filter them out lower down in the plan.\n\nSince the top-level WindowAgg node can now filter tuples, the executor\nnode had to be given a for(;;) loop so that it goes around again for\nanother tuple after it filters a tuple out.\n\nI've also updated the commit message which I think I've made quite\nclear about what we optimise and how it's done.\n\n> And I would suggest the below fastpath for this feature.\n> -                               if (check_and_push_window_quals(subquery, rte, rti, clause))\n> +                               if (!subquery->hasWindowFuncs || check_and_push_window_quals(subquery, rte, rti, clause))\n\nGood idea. Thanks!\n\nDavidHi,+                * We must keep the original qual in place if there is a+                * PARTITION BY clause as the top-level WindowAgg remains in+                * pass-through mode and does nothing to filter out unwanted+                * tuples.+                */+               *keep_original = false;The comment talks about keeping original qual but the assignment uses the value false.Maybe the comment can be rephrased so that it matches the assignment.Cheers", "msg_date": "Wed, 6 Apr 2022 20:45:56 -0700", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Thu, 7 Apr 2022 at 15:41, Zhihong Yu <zyu@yugabyte.com> wrote:\n> + * We must keep the original qual in place if there is a\n> + * PARTITION BY clause as the top-level WindowAgg remains in\n> + * pass-through mode and does nothing to filter out unwanted\n> + * tuples.\n> + */\n> + *keep_original = false;\n>\n> The comment talks about keeping original qual but the assignment uses the value false.\n> Maybe the comment can be rephrased so that it matches the assignment.\n\nThanks. I've just removed that comment locally now. You're right, it\nwas out of date.\n\nDavid\n\n\n", "msg_date": "Thu, 7 Apr 2022 19:01:13 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Thu, 7 Apr 2022 at 19:01, David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Thu, 7 Apr 2022 at 15:41, Zhihong Yu <zyu@yugabyte.com> wrote:\n> > + * We must keep the original qual in place if there is a\n> > + * PARTITION BY clause as the top-level WindowAgg remains in\n> > + * pass-through mode and does nothing to filter out unwanted\n> > + * tuples.\n> > + */\n> > + *keep_original = false;\n> >\n> > The comment talks about keeping original qual but the assignment uses the value false.\n> > Maybe the comment can be rephrased so that it matches the assignment.\n>\n> Thanks. I've just removed that comment locally now. You're right, it\n> was out of date.\n\nI've attached the updated patch with the fixed comment and a few other\ncomments reworded slightly.\n\nI've also pgindented the patch.\n\nBarring any objection, I'm planning to push this one in around 10 hours time.\n\nDavid", "msg_date": "Fri, 8 Apr 2022 02:11:06 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Thu, Apr 7, 2022 at 7:11 AM David Rowley <dgrowleyml@gmail.com> wrote:\n\n> On Thu, 7 Apr 2022 at 19:01, David Rowley <dgrowleyml@gmail.com> wrote:\n> >\n> > On Thu, 7 Apr 2022 at 15:41, Zhihong Yu <zyu@yugabyte.com> wrote:\n> > > + * We must keep the original qual in place if there is\n> a\n> > > + * PARTITION BY clause as the top-level WindowAgg\n> remains in\n> > > + * pass-through mode and does nothing to filter out\n> unwanted\n> > > + * tuples.\n> > > + */\n> > > + *keep_original = false;\n> > >\n> > > The comment talks about keeping original qual but the assignment uses\n> the value false.\n> > > Maybe the comment can be rephrased so that it matches the assignment.\n> >\n> > Thanks. I've just removed that comment locally now. You're right, it\n> > was out of date.\n>\n> I've attached the updated patch with the fixed comment and a few other\n> comments reworded slightly.\n>\n> I've also pgindented the patch.\n>\n> Barring any objection, I'm planning to push this one in around 10 hours\n> time.\n>\n> David\n>\nHi,\n\n+ WINDOWAGG_PASSTHROUGH_STRICT /* Pass-through plus don't store new\n+ * tuples during spool */\n\nI think the comment in code is illustrative:\n\n+ * STRICT pass-through mode is required for the top\nwindow\n+ * when there is a PARTITION BY clause. Otherwise we\nmust\n+ * ensure we store tuples that don't match the\n+ * runcondition so they're available to WindowAggs\nabove.\n\nIf you think the above is too long where WINDOWAGG_PASSTHROUGH_STRICT is\ndefined, maybe point to the longer version so that people can find that\nmore easily.\n\nCheers\n\nOn Thu, Apr 7, 2022 at 7:11 AM David Rowley <dgrowleyml@gmail.com> wrote:On Thu, 7 Apr 2022 at 19:01, David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Thu, 7 Apr 2022 at 15:41, Zhihong Yu <zyu@yugabyte.com> wrote:\n> > +                * We must keep the original qual in place if there is a\n> > +                * PARTITION BY clause as the top-level WindowAgg remains in\n> > +                * pass-through mode and does nothing to filter out unwanted\n> > +                * tuples.\n> > +                */\n> > +               *keep_original = false;\n> >\n> > The comment talks about keeping original qual but the assignment uses the value false.\n> > Maybe the comment can be rephrased so that it matches the assignment.\n>\n> Thanks. I've just removed that comment locally now. You're right, it\n> was out of date.\n\nI've attached the updated patch with the fixed comment and a few other\ncomments reworded slightly.\n\nI've also pgindented the patch.\n\nBarring any objection, I'm planning to push this one in around 10 hours time.\n\nDavidHi,+   WINDOWAGG_PASSTHROUGH_STRICT    /* Pass-through plus don't store new+                                    * tuples during spool */I think the comment in code is illustrative:+                    * STRICT pass-through mode is required for the top window+                    * when there is a PARTITION BY clause.  Otherwise we must+                    * ensure we store tuples that don't match the+                    * runcondition so they're available to WindowAggs above. If you think the above is too long where WINDOWAGG_PASSTHROUGH_STRICT is defined, maybe point to the longer version so that people can find that more easily.Cheers", "msg_date": "Thu, 7 Apr 2022 15:08:10 -0700", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": false, "msg_subject": "Re: Window Function \"Run Conditions\"" }, { "msg_contents": "On Fri, 8 Apr 2022 at 02:11, David Rowley <dgrowleyml@gmail.com> wrote:\n> Barring any objection, I'm planning to push this one in around 10 hours time.\n\nPushed. 9d9c02ccd\n\nThank you all for the reviews.\n\nDavid\n\n\n", "msg_date": "Fri, 8 Apr 2022 10:40:37 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Window Function \"Run Conditions\"" } ]
[ { "msg_contents": "Hello.\n\nWhile I looked a patch, I found that the following ECPG statement\ngenerates uncompilable .c source.\n\nEXEC SQL CREATE TABLE t AS stmt;\n\necpgtest.c:\n#line 42 \"ecpgtest.pgc\"\n\n\t\tprintf(\"1:dbname=%s\\n\", dbname);\n\t\t{ ECPGdo(__LINE__, 0, 1, NULL, 0, ECPGst_execute, create table t as execute \"stmt\", ECPGt_EOIT, ECPGt_EORT);\n\nThis is apparently broken. The cause is that the rule ExecutStmt is\nassumed to return a statement name when type is empty (or null), while\nit actually returns a full statement for the CREATE TABLE AS EXECUTE\nsyntax.\n\nSeparating \"CREATE TABLE AS EXECUTE\" from ExecuteStmt would be cleaner\nbut I avoided to change the syntax tree. Instead the attched make\ndistinction of $$.type of ExecuteStmt between NULL and \"\" to use to\nnotify the returned name is name of a prepared statement or a full\nstatement.\n\nI'll post the test part later.\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center", "msg_date": "Thu, 01 Jul 2021 18:45:25 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": true, "msg_subject": "ECPG doesn't compile CREATE AS EXECUTE properly." }, { "msg_contents": "At Thu, 01 Jul 2021 18:45:25 +0900 (JST), Kyotaro Horiguchi <horikyota.ntt@gmail.com> wrote in \n> I'll post the test part later.\n\nA version incluedes the test part.\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center", "msg_date": "Fri, 02 Jul 2021 13:01:11 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ECPG doesn't compile CREATE AS EXECUTE properly." }, { "msg_contents": "On Thu, Jul 01, 2021 at 06:45:25PM +0900, Kyotaro Horiguchi wrote:\n> Separating \"CREATE TABLE AS EXECUTE\" from ExecuteStmt would be cleaner\n> but I avoided to change the syntax tree. Instead the attched make\n> distinction of $$.type of ExecuteStmt between NULL and \"\" to use to\n> notify the returned name is name of a prepared statement or a full\n> statement.\n\nI am not so sure, and using an empty string makes the code a bit\nharder to follow. How would that look with the grammar split you have\nin mind? Maybe that makes the code more consistent with the PREPARE\nblock a couple of lines above?\n--\nMichael", "msg_date": "Tue, 6 Jul 2021 11:17:47 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: ECPG doesn't compile CREATE AS EXECUTE properly." }, { "msg_contents": "Thanks for the comment.\n\nAt Tue, 6 Jul 2021 11:17:47 +0900, Michael Paquier <michael@paquier.xyz> wrote in \n> On Thu, Jul 01, 2021 at 06:45:25PM +0900, Kyotaro Horiguchi wrote:\n> > Separating \"CREATE TABLE AS EXECUTE\" from ExecuteStmt would be cleaner\n> > but I avoided to change the syntax tree. Instead the attched make\n> > distinction of $$.type of ExecuteStmt between NULL and \"\" to use to\n> > notify the returned name is name of a prepared statement or a full\n> > statement.\n> \n> I am not so sure, and using an empty string makes the code a bit\n> harder to follow. How would that look with the grammar split you have\n\nI agree to that.\n\n> in mind? Maybe that makes the code more consistent with the PREPARE\n> block a couple of lines above?\n\nMore accurately, I didn't come up with the way to split out some of\nthe rule-components in a rule out as another rule using the existing\ninfrastructure.\n\npreproc.y:\n\n ExecuteStmt:\nEXECUTE prepared_name execute_param_clause execute_rest\n\t{}\n| CREATE OptTemp TABLE create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest\n\t{}\n| CREATE OptTemp TABLE IF_P NOT EXISTS create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest\n\t{}\n;\n\nI can directly edit this as the following:\n\n ExecuteStmt:\nEXECUTE prepared_name execute_param_clause execute_rest\n\t{}\n;\n\nCreateExecuteStmt:\n| CREATE OptTemp TABLE create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest\n\t{}\n| CREATE OptTemp TABLE IF_P NOT EXISTS create_as_target AS EXECUTE prepared_name execute_param_clause opt_with_data execute_rest\n\t{}\n;\n\nThen add the following component to the rule \"stmt\".\n\n| CreateExecuteStmt:\n { output_statement(..., ECPGst_normal); }\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center\n\n\n", "msg_date": "Tue, 06 Jul 2021 17:47:34 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": true, "msg_subject": "Re: ECPG doesn't compile CREATE AS EXECUTE properly." }, { "msg_contents": "On Tue, Jul 06, 2021 at 05:47:34PM +0900, Kyotaro Horiguchi wrote:\n> More accurately, I didn't come up with the way to split out some of\n> the rule-components in a rule out as another rule using the existing\n> infrastructure.\n>\n> [...]\n> \n> Then add the following component to the rule \"stmt\".\n\nI see. That sounds interesting as solution, and consistent with the\nexisting cursor queries. The ECPG parser is not that advanced, and we\nmay not really need to make it more complicated with sub-clause\nhandling like INEs. So I'd be rather in favor of what you are\ndescribing here.\n--\nMichael", "msg_date": "Tue, 6 Jul 2021 20:10:15 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: ECPG doesn't compile CREATE AS EXECUTE properly." } ]
[ { "msg_contents": "Quick patch to add mention of the need for compiling with\n--enable-tap-tests on the TAP section page\n\nhttps://www.postgresql.org/docs/current/regress-tap.html\n\nSearching about the TAP tests often leads to this page, but there is no\neasy link or mention of the fact that the sample invocations will not work\nwithout the special config flag.\n\nCheers,\nGreg", "msg_date": "Thu, 1 Jul 2021 10:03:10 -0400", "msg_from": "Greg Sabino Mullane <htamfids@gmail.com>", "msg_from_op": true, "msg_subject": "Mention --enable-tap-tests in the TAP section page" }, { "msg_contents": "On Thu, Jul 01, 2021 at 10:03:10AM -0400, Greg Sabino Mullane wrote:\n> Searching about the TAP tests often leads to this page, but there is no\n> easy link or mention of the fact that the sample invocations will not work\n> without the special config flag.\n\nThis is mentioned on a different page, \"Running the Tests\", but for\nthe set of extra tests. Adding an extra reference on this page is a\ngood idea.\n--\nMichael", "msg_date": "Fri, 2 Jul 2021 10:53:07 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Mention --enable-tap-tests in the TAP section page" }, { "msg_contents": "\nOn 7/1/21 9:53 PM, Michael Paquier wrote:\n> On Thu, Jul 01, 2021 at 10:03:10AM -0400, Greg Sabino Mullane wrote:\n>> Searching about the TAP tests often leads to this page, but there is no\n>> easy link or mention of the fact that the sample invocations will not work\n>> without the special config flag.\n> This is mentioned on a different page, \"Running the Tests\", but for\n> the set of extra tests. Adding an extra reference on this page is a\n> good idea.\n\n\n\n\nAgreed.\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Fri, 2 Jul 2021 09:52:10 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: Mention --enable-tap-tests in the TAP section page" }, { "msg_contents": "On Fri, Jul 02, 2021 at 09:52:10AM -0400, Andrew Dunstan wrote:\n> Agreed.\n\nApplied.\n--\nMichael", "msg_date": "Sun, 4 Jul 2021 21:00:37 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Mention --enable-tap-tests in the TAP section page" } ]
[ { "msg_contents": "Hi,\n\nThe function FreePageManagerPutInternal can access an uninitialized\nvariable,\nif the following conditions occur:\n\n1. fpm->btree_depth != 0\n2. relptr_off == 0 inside function (FreePageBtreeSearch)\n\nPerhaps this is a rare situation, but I think it's worth preventing.\n\n/* Search the btree. */\nFreePageBtreeSearch(fpm, first_page, &result);\nAssert(!result.found);\nif (result.index > 0) /* result.index is garbage or invalid here) */\n\nregards,\nRanier Vilela", "msg_date": "Thu, 1 Jul 2021 16:42:41 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Fix uninitialized variable access (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "On Fri, 2 Jul 2021 at 01:13, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> Hi,\n>\n> The function FreePageManagerPutInternal can access an uninitialized\nvariable,\n> if the following conditions occur:\n\nPatch looks good to me.\n\n> 1. fpm->btree_depth != 0\n> 2. relptr_off == 0 inside function (FreePageBtreeSearch)\n>\n> Perhaps this is a rare situation, but I think it's worth preventing.\n\nPlease can we try to hit this rare condition by any test case. If you have\nany test cases, please share.\n\n1064 FreePageBtreeSearch(FreePageManager *fpm, Size first_page,\n\n\n1065 FreePageBtreeSearchResult *result)\n\n1066 {\n\n1067 char *base = fpm_segment_base(fpm);\n\n1068 FreePageBtree *btp = relptr_access(base, fpm->btree_root);\n\n1069 Size index;\n\n1070\n\n1071 result->split_pages = 1;\n\n1072\n\n1073 /* If the btree is empty, there's nothing to find. */\n\n1074 if (*btp == NULL*)\n\n1075 {\n\n1076 result->page = NULL;\n\n1077 result->found = false;\n\n1078 return;\n\n1079 }\n\n>\n> /* Search the btree. */\n> FreePageBtreeSearch(fpm, first_page, &result);\n> Assert(!result.found);\n> if (result.index > 0) /* result.index is garbage or invalid here) */\n>\n> regards,\n> Ranier Vilela\n\n\n-- \nThanks and Regards\nMahendra Singh Thalor\nEnterpriseDB: http://www.enterprisedb.com\n\nOn Fri, 2 Jul 2021 at 01:13, Ranier Vilela <ranier.vf@gmail.com> wrote:>> Hi,>> The function FreePageManagerPutInternal can access an uninitialized variable,> if the following conditions occur:Patch looks good to me.> 1. fpm->btree_depth != 0> 2. relptr_off == 0 inside function (FreePageBtreeSearch)>> Perhaps this is a rare situation, but I think it's worth preventing.Please can we try to hit this rare condition by any test case. If you have any test cases, please share.1064 FreePageBtreeSearch(FreePageManager *fpm, Size first_page,                                                                                                                  1065                     FreePageBtreeSearchResult *result)                          1066 {                                                                               1067     char       *base = fpm_segment_base(fpm);                                   1068     FreePageBtree *btp = relptr_access(base, fpm->btree_root);                  1069     Size        index;                                                          1070                                                                                 1071     result->split_pages = 1;                                                    1072                                                                                 1073     /* If the btree is empty, there's nothing to find. */                       1074     if (btp == NULL)                                                            1075     {                                                                           1076         result->page = NULL;                                                    1077         result->found = false;                                                  1078         return;                                                                 1079     }  >> /* Search the btree. */> FreePageBtreeSearch(fpm, first_page, &result);> Assert(!result.found);> if (result.index > 0)   /* result.index is garbage or invalid here) */>> regards,> Ranier Vilela-- Thanks and RegardsMahendra Singh ThalorEnterpriseDB: http://www.enterprisedb.com", "msg_date": "Fri, 2 Jul 2021 01:49:58 +0530", "msg_from": "Mahendra Singh Thalor <mahi6run@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "Em qui., 1 de jul. de 2021 às 17:20, Mahendra Singh Thalor <\nmahi6run@gmail.com> escreveu:\n\n> On Fri, 2 Jul 2021 at 01:13, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> >\n> > Hi,\n> >\n> > The function FreePageManagerPutInternal can access an uninitialized\n> variable,\n> > if the following conditions occur:\n>\n> Patch looks good to me.\n>\n> > 1. fpm->btree_depth != 0\n> > 2. relptr_off == 0 inside function (FreePageBtreeSearch)\n> >\n> > Perhaps this is a rare situation, but I think it's worth preventing.\n>\n> Please can we try to hit this rare condition by any test case. If you have\n> any test cases, please share.\n>\nAdded to Commitfest (https://commitfest.postgresql.org/34/3236/), so we\ndon't forget.\n\nregards,\nRanier Vilela\n\n>\n\nEm qui., 1 de jul. de 2021 às 17:20, Mahendra Singh Thalor <mahi6run@gmail.com> escreveu:On Fri, 2 Jul 2021 at 01:13, Ranier Vilela <ranier.vf@gmail.com> wrote:>> Hi,>> The function FreePageManagerPutInternal can access an uninitialized variable,> if the following conditions occur:Patch looks good to me.> 1. fpm->btree_depth != 0> 2. relptr_off == 0 inside function (FreePageBtreeSearch)>> Perhaps this is a rare situation, but I think it's worth preventing.Please can we try to hit this rare condition by any test case. If you have any test cases, please share.Added to Commitfest (https://commitfest.postgresql.org/34/3236/), so we don't forget. regards,Ranier Vilela", "msg_date": "Fri, 2 Jul 2021 18:22:56 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "On Fri, Jul 02, 2021 at 06:22:56PM -0300, Ranier Vilela wrote:\n> Em qui., 1 de jul. de 2021 às 17:20, Mahendra Singh Thalor <\n> mahi6run@gmail.com> escreveu:\n>> Please can we try to hit this rare condition by any test case. If you have\n>> any test cases, please share.\n\nYeah, this needs to be proved. Are you sure that this change is\nactually right? The bottom of FreePageManagerPutInternal() has\nassumptions that a page may not be found during a btree search, with\nan index value used.\n--\nMichael", "msg_date": "Tue, 17 Aug 2021 17:04:44 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "Em ter., 17 de ago. de 2021 às 05:04, Michael Paquier <michael@paquier.xyz>\nescreveu:\n\n> On Fri, Jul 02, 2021 at 06:22:56PM -0300, Ranier Vilela wrote:\n> > Em qui., 1 de jul. de 2021 às 17:20, Mahendra Singh Thalor <\n> > mahi6run@gmail.com> escreveu:\n> >> Please can we try to hit this rare condition by any test case. If you\n> have\n> >> any test cases, please share.\n>\n> Yeah, this needs to be proved.\n\nDue to the absolute lack of reports, I believe that this particular case\nnever happened.\n\n Are you sure that this change is\n> actually right?\n\nYes, have.\n\n The bottom of FreePageManagerPutInternal() has\n> assumptions that a page may not be found during a btree search, with\n> an index value used.\n>\nAssert assumptions are for Debug.\nIf that's conditions happen, all *result.index* touches are garbage.\n\nregards,\nRanier Vilela\n\nEm ter., 17 de ago. de 2021 às 05:04, Michael Paquier <michael@paquier.xyz> escreveu:On Fri, Jul 02, 2021 at 06:22:56PM -0300, Ranier Vilela wrote:\n> Em qui., 1 de jul. de 2021 às 17:20, Mahendra Singh Thalor <\n> mahi6run@gmail.com> escreveu:\n>> Please can we try to hit this rare condition by any test case. If you have\n>> any test cases, please share.\n\nYeah, this needs to be proved.Due to the absolute lack of reports, I believe that this particular case never happened.   Are you sure that this change is\nactually right?Yes, have.   The bottom of FreePageManagerPutInternal() has\nassumptions that a page may not be found during a btree search, with\nan index value used.Assert assumptions are for Debug.If that's conditions happen, all *result.index* touches are garbage.regards,Ranier Vilela", "msg_date": "Tue, 17 Aug 2021 08:13:35 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "On Tue, Aug 17, 2021 at 9:13 PM Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> If that's conditions happen, all *result.index* touches are garbage.\n>\n\nThe patch looks valid to me, as the \"index\" member is not set in the\n\"btp == NULL\" case, and so has a junk value in the caller, and it's\nbeing used to index an array,\nBUT - isn't it also necessary to set the \"split_pages\" member to 0,\nbecause it also is not currently being set, and so too will have a\njunk value in this case (and it's possible for it to be referenced by\nthe caller in this case).\nThe \"btp == NULL\" case is not hit by any existing test cases, and does\nseem to be a rare case.\n\n\nRegards,\nGreg Nancarrow\nFujitsu Australia\n\n\n", "msg_date": "Tue, 17 Aug 2021 23:22:18 +1000", "msg_from": "Greg Nancarrow <gregn4422@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "Em ter., 17 de ago. de 2021 às 10:22, Greg Nancarrow <gregn4422@gmail.com>\nescreveu:\n\n> On Tue, Aug 17, 2021 at 9:13 PM Ranier Vilela <ranier.vf@gmail.com> wrote:\n> >\n> > If that's conditions happen, all *result.index* touches are garbage.\n> >\n>\n> The patch looks valid to me, as the \"index\" member is not set in the\n> \"btp == NULL\" case, and so has a junk value in the caller, and it's\n> being used to index an array,\n> BUT - isn't it also necessary to set the \"split_pages\" member to 0,\n> because it also is not currently being set, and so too will have a\n> junk value in this case (and it's possible for it to be referenced by\n> the caller in this case).\n>\nI agree.\n\nAttached new version (v1).\n\nregards,\nRanier Vilela", "msg_date": "Tue, 17 Aug 2021 11:27:42 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "At Tue, 17 Aug 2021 17:04:44 +0900, Michael Paquier <michael@paquier.xyz> wrote in \n> On Fri, Jul 02, 2021 at 06:22:56PM -0300, Ranier Vilela wrote:\n> > Em qui., 1 de jul. de 2021 às 17:20, Mahendra Singh Thalor <\n> > mahi6run@gmail.com> escreveu:\n> >> Please can we try to hit this rare condition by any test case. If you have\n> >> any test cases, please share.\n> \n> Yeah, this needs to be proved. Are you sure that this change is\n> actually right? The bottom of FreePageManagerPutInternal() has\n> assumptions that a page may not be found during a btree search, with\n> an index value used.\n\nBy a quick look, FreePageBtreeSearch is called only from\nFreePageManagerPutInternal at three points. The first one assumes that\nresult.found == true, at the rest points are passed only when\nfpm->btree_depth > 0, i.e, fpm->btree_root is non-NULL.\n\nIn short FreePageBtreeSeach is never called when fpm->btree_root is\nNULL. I don't think we need to fill-in other members since the\ncontract of the function looks fine.\n\nIt might be simpler to turn 'if (btp == NULL)' to an assertion.\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center\n\n\n", "msg_date": "Wed, 18 Aug 2021 17:29:58 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "On Wed, Aug 18, 2021 at 6:30 PM Kyotaro Horiguchi\n<horikyota.ntt@gmail.com> wrote:\n>\n> By a quick look, FreePageBtreeSearch is called only from\n> FreePageManagerPutInternal at three points. The first one assumes that\n> result.found == true, at the rest points are passed only when\n> fpm->btree_depth > 0, i.e, fpm->btree_root is non-NULL.\n>\n> In short FreePageBtreeSeach is never called when fpm->btree_root is\n> NULL. I don't think we need to fill-in other members since the\n> contract of the function looks fine.\n>\n> It might be simpler to turn 'if (btp == NULL)' to an assertion.\n>\n\nEven if there are no current calls to FreePageBtreeSeach() where it\nresults in \"btp == NULL\", FreePageBtreeSeach() is obviously handling\nthe possibility of that condition, and I think it's poor form to\nreturn with two uninitialized members in the result for that,\nespecially when the current code for the \"!result.found\" case can\nreference those members, and the usual return point of\nFreePageBtreeSeach() has all result members set, including\nresult.found==true and result.found=false cases.\nAt best it's inconsistent and confusing and it looks like a bug\nwaiting to happen, so I'm still in favor of the patch.\n\nRegards,\nGreg Nancarrow\nFujitsu Australia\n\n\n", "msg_date": "Wed, 18 Aug 2021 20:53:31 +1000", "msg_from": "Greg Nancarrow <gregn4422@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "Em qua., 18 de ago. de 2021 às 05:30, Kyotaro Horiguchi <\nhorikyota.ntt@gmail.com> escreveu:\n\n> At Tue, 17 Aug 2021 17:04:44 +0900, Michael Paquier <michael@paquier.xyz>\n> wrote in\n> > On Fri, Jul 02, 2021 at 06:22:56PM -0300, Ranier Vilela wrote:\n> > > Em qui., 1 de jul. de 2021 às 17:20, Mahendra Singh Thalor <\n> > > mahi6run@gmail.com> escreveu:\n> > >> Please can we try to hit this rare condition by any test case. If you\n> have\n> > >> any test cases, please share.\n> >\n> > Yeah, this needs to be proved. Are you sure that this change is\n> > actually right? The bottom of FreePageManagerPutInternal() has\n> > assumptions that a page may not be found during a btree search, with\n> > an index value used.\n>\n> By a quick look, FreePageBtreeSearch is called only from\n> FreePageManagerPutInternal at three points. The first one assumes that\n> result.found == true, at the rest points are passed only when\n> fpm->btree_depth > 0, i.e, fpm->btree_root is non-NULL.\n>\nIn short, it's a failure ready to happen, just someone who trusts\nFreePageBtreeSearch will do the right thing,\nlike not leaving structure with uninitialized fields.\n\n\n> In short FreePageBtreeSeach is never called when fpm->btree_root is\n> NULL. I don't think we need to fill-in other members since the\n> contract of the function looks fine.\n>\nQuite the contrary, the contract is not being fulfilled.\n\n\n> It might be simpler to turn 'if (btp == NULL)' to an assertion.\n>\nAre you sure that no condition will ever occur in production?\nAssertion is not for mistakes that can happen.\n\nregards,\nRanier Vilela\n\nEm qua., 18 de ago. de 2021 às 05:30, Kyotaro Horiguchi <horikyota.ntt@gmail.com> escreveu:At Tue, 17 Aug 2021 17:04:44 +0900, Michael Paquier <michael@paquier.xyz> wrote in \n> On Fri, Jul 02, 2021 at 06:22:56PM -0300, Ranier Vilela wrote:\n> > Em qui., 1 de jul. de 2021 às 17:20, Mahendra Singh Thalor <\n> > mahi6run@gmail.com> escreveu:\n> >> Please can we try to hit this rare condition by any test case. If you have\n> >> any test cases, please share.\n> \n> Yeah, this needs to be proved.  Are you sure that this change is\n> actually right?  The bottom of FreePageManagerPutInternal() has\n> assumptions that a page may not be found during a btree search, with\n> an index value used.\n\nBy a quick look, FreePageBtreeSearch is called only from\nFreePageManagerPutInternal at three points. The first one assumes that\nresult.found == true, at the rest points are passed only when\nfpm->btree_depth > 0, i.e, fpm->btree_root is non-NULL.In short, it's a failure ready to happen, just someone who trusts FreePageBtreeSearch will do the right thing, like not leaving structure with uninitialized fields.\n\nIn short FreePageBtreeSeach is never called when fpm->btree_root is\nNULL.  I don't think we need to fill-in other members since the\ncontract of the function looks fine.Quite the contrary, the contract is not being fulfilled. \n\nIt might be simpler to turn 'if (btp == NULL)' to an assertion.Are you sure that no condition will ever occur in production?Assertion is not for mistakes that can happen.regards,Ranier Vilela", "msg_date": "Wed, 18 Aug 2021 08:06:18 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "On 2021-08-18 1:29 a.m., Kyotaro Horiguchi wrote:\n> At Tue, 17 Aug 2021 17:04:44 +0900, Michael Paquier <michael@paquier.xyz> wrote in\n>> On Fri, Jul 02, 2021 at 06:22:56PM -0300, Ranier Vilela wrote:\n>>> Em qui., 1 de jul. de 2021 às 17:20, Mahendra Singh Thalor <\n>>> mahi6run@gmail.com> escreveu:\n>>>> Please can we try to hit this rare condition by any test case. If you have\n>>>> any test cases, please share.\n>> Yeah, this needs to be proved. Are you sure that this change is\n>> actually right? The bottom of FreePageManagerPutInternal() has\n>> assumptions that a page may not be found during a btree search, with\n>> an index value used.\n> By a quick look, FreePageBtreeSearch is called only from\n> FreePageManagerPutInternal at three points. The first one assumes that\n> result.found == true, at the rest points are passed only when\n> fpm->btree_depth > 0, i.e, fpm->btree_root is non-NULL.\n>\n> In short FreePageBtreeSeach is never called when fpm->btree_root is\n> NULL. I don't think we need to fill-in other members since the\n> contract of the function looks fine.\n>\n> It might be simpler to turn 'if (btp == NULL)' to an assertion.\nAfter added the initialization of split_pages in patch \nfix_unitialized_var_index_freepage-v1.patch,\n\n+        result->split_pages = 0;\n\nit actually changed the assertion condition after the second time \nfunction call of FreePageBtreeSearch.\n             FreePageBtreeSearch(fpm, first_page, &result);\n\n             /*\n              * The act of allocating pages for use in constructing our \nbtree\n              * should never cause any page to become more full, so the new\n              * split depth should be no greater than the old one, and \nperhaps\n              * less if we fortuitously allocated a chunk that freed up \na slot\n              * on the page we need to update.\n              */\n             Assert(result.split_pages <= fpm->btree_recycle_count);\n\nShould we consider adding some test cases to make sure this assertion \nwill still function properly?\n\n>\n> regards.\n>\n-- \nDavid\n\nSoftware Engineer\nHighgo Software Inc. (Canada)\nwww.highgo.ca\n\n\n", "msg_date": "Fri, 1 Oct 2021 12:23:42 -0700", "msg_from": "David Zhang <david.zhang@highgo.ca>", "msg_from_op": false, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "Em sex., 1 de out. de 2021 às 16:24, David Zhang <david.zhang@highgo.ca>\nescreveu:\n\n> On 2021-08-18 1:29 a.m., Kyotaro Horiguchi wrote:\n> > At Tue, 17 Aug 2021 17:04:44 +0900, Michael Paquier <michael@paquier.xyz>\n> wrote in\n> >> On Fri, Jul 02, 2021 at 06:22:56PM -0300, Ranier Vilela wrote:\n> >>> Em qui., 1 de jul. de 2021 às 17:20, Mahendra Singh Thalor <\n> >>> mahi6run@gmail.com> escreveu:\n> >>>> Please can we try to hit this rare condition by any test case. If you\n> have\n> >>>> any test cases, please share.\n> >> Yeah, this needs to be proved. Are you sure that this change is\n> >> actually right? The bottom of FreePageManagerPutInternal() has\n> >> assumptions that a page may not be found during a btree search, with\n> >> an index value used.\n> > By a quick look, FreePageBtreeSearch is called only from\n> > FreePageManagerPutInternal at three points. The first one assumes that\n> > result.found == true, at the rest points are passed only when\n> > fpm->btree_depth > 0, i.e, fpm->btree_root is non-NULL.\n> >\n> > In short FreePageBtreeSeach is never called when fpm->btree_root is\n> > NULL. I don't think we need to fill-in other members since the\n> > contract of the function looks fine.\n> >\n> > It might be simpler to turn 'if (btp == NULL)' to an assertion.\n> After added the initialization of split_pages in patch\n> fix_unitialized_var_index_freepage-v1.patch,\n>\n> + result->split_pages = 0;\n>\n> it actually changed the assertion condition after the second time\n> function call of FreePageBtreeSearch.\n> FreePageBtreeSearch(fpm, first_page, &result);\n>\n> /*\n> * The act of allocating pages for use in constructing our\n> btree\n> * should never cause any page to become more full, so the new\n> * split depth should be no greater than the old one, and\n> perhaps\n> * less if we fortuitously allocated a chunk that freed up\n> a slot\n> * on the page we need to update.\n> */\n> Assert(result.split_pages <= fpm->btree_recycle_count);\n>\nFor me the assertion remains valid and usable.\n\nregards,\nRanier Vilela\n\nEm sex., 1 de out. de 2021 às 16:24, David Zhang <david.zhang@highgo.ca> escreveu:On 2021-08-18 1:29 a.m., Kyotaro Horiguchi wrote:\n> At Tue, 17 Aug 2021 17:04:44 +0900, Michael Paquier <michael@paquier.xyz> wrote in\n>> On Fri, Jul 02, 2021 at 06:22:56PM -0300, Ranier Vilela wrote:\n>>> Em qui., 1 de jul. de 2021 às 17:20, Mahendra Singh Thalor <\n>>> mahi6run@gmail.com> escreveu:\n>>>> Please can we try to hit this rare condition by any test case. If you have\n>>>> any test cases, please share.\n>> Yeah, this needs to be proved.  Are you sure that this change is\n>> actually right?  The bottom of FreePageManagerPutInternal() has\n>> assumptions that a page may not be found during a btree search, with\n>> an index value used.\n> By a quick look, FreePageBtreeSearch is called only from\n> FreePageManagerPutInternal at three points. The first one assumes that\n> result.found == true, at the rest points are passed only when\n> fpm->btree_depth > 0, i.e, fpm->btree_root is non-NULL.\n>\n> In short FreePageBtreeSeach is never called when fpm->btree_root is\n> NULL.  I don't think we need to fill-in other members since the\n> contract of the function looks fine.\n>\n> It might be simpler to turn 'if (btp == NULL)' to an assertion.\nAfter added the initialization of split_pages in patch \nfix_unitialized_var_index_freepage-v1.patch,\n\n+        result->split_pages = 0;\n\nit actually changed the assertion condition after the second time \nfunction call of FreePageBtreeSearch.\n             FreePageBtreeSearch(fpm, first_page, &result);\n\n             /*\n              * The act of allocating pages for use in constructing our \nbtree\n              * should never cause any page to become more full, so the new\n              * split depth should be no greater than the old one, and \nperhaps\n              * less if we fortuitously allocated a chunk that freed up \na slot\n              * on the page we need to update.\n              */\n             Assert(result.split_pages <= fpm->btree_recycle_count);For me the assertion remains valid and usable.regards,Ranier Vilela", "msg_date": "Fri, 1 Oct 2021 17:03:04 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "On Fri, Oct 01, 2021 at 05:03:04PM -0300, Ranier Vilela wrote:\n> For me the assertion remains valid and usable.\n\nWell, I was looking at this thread again, and I still don't see what\nwe benefit from this change. One thing that could also be done is to\ninitialize \"result\" at {0} at the top of FreePageManagerGetInternal()\nand FreePageManagerPutInternal(), but that's in the same category as\nthe other suggestions. I'll go drop the patch if there are no\nobjections.\n--\nMichael", "msg_date": "Thu, 27 Jan 2022 16:32:28 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "On Thu, Jan 27, 2022 at 6:32 PM Michael Paquier <michael@paquier.xyz> wrote:\n>\n> On Fri, Oct 01, 2021 at 05:03:04PM -0300, Ranier Vilela wrote:\n> > For me the assertion remains valid and usable.\n>\n> Well, I was looking at this thread again, and I still don't see what\n> we benefit from this change. One thing that could also be done is to\n> initialize \"result\" at {0} at the top of FreePageManagerGetInternal()\n> and FreePageManagerPutInternal(), but that's in the same category as\n> the other suggestions. I'll go drop the patch if there are no\n> objections.\n\nWhy not, at least, just add \"Assert(result.page != NULL);\" after the\n\"Assert(!result.found);\" in FreePageManagerPutInternal()?\nThe following code block in FreePageBtreeSearch() - which lacks those\ninitializations - should never be invoked in this case, and the added\nAssert will make this more obvious.\n\nif (btp == NULL)\n{\n result->page = NULL;\n result->found = false;\n return;\n}\n\nRegards,\nGreg Nancarrow\nFujitsu Australia\n\n\n", "msg_date": "Thu, 27 Jan 2022 23:32:52 +1100", "msg_from": "Greg Nancarrow <gregn4422@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "On Thu, Jan 27, 2022 at 7:33 AM Greg Nancarrow <gregn4422@gmail.com> wrote:\n> Why not, at least, just add \"Assert(result.page != NULL);\" after the\n> \"Assert(!result.found);\" in FreePageManagerPutInternal()?\n> The following code block in FreePageBtreeSearch() - which lacks those\n> initializations - should never be invoked in this case, and the added\n> Assert will make this more obvious.\n>\n> if (btp == NULL)\n> {\n> result->page = NULL;\n> result->found = false;\n> return;\n> }\n\nThis patch is now in its fourth CommitFest, which is really a pretty\nhigh number for a patch that has no demonstrated benefit. I'm marking\nit rejected.\n\nIf you or someone else wants to submit a carefully-considered patch to\nadd meaningful assertions to this file in places where it would\nclarify the intent of the code, please feel free to do that. But the\npatch as presented doesn't do that. It simply initializes some\nstructure members to arbitrary values that probably won't produce\nsensible results instead of leaving them uninitialized which probably\nwon't lead to sensible results either. It's been argued that this\ncould prevent future bugs, but I find that dubious. This code isn't\nlikely to be heavily modified in the future - it's a low-level\nsubsystem that has thus far shown no evidence of needing major\nsurgery. If surgery does happen in the future, I would argue that this\nchange could easily *mask* bugs, because if somebody tries to apply\nvalgrind to this code the useless initializations will just cover up\nwhat valgrind would otherwise detect as an access to uninitialized\nmemory.\n\nPlease let's move on. There are almost 300 patches in this CommitFest\nand many of them add nifty features or fix demonstrable bugs. This\ndoes neither.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Mon, 14 Mar 2022 15:15:18 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" }, { "msg_contents": "On Mon, Mar 14, 2022 at 12:15 PM Robert Haas <robertmhaas@gmail.com> wrote:\n> If surgery does happen in the future, I would argue that this\n> change could easily *mask* bugs, because if somebody tries to apply\n> valgrind to this code the useless initializations will just cover up\n> what valgrind would otherwise detect as an access to uninitialized\n> memory.\n\nI agree. I have found it useful to VALGRIND_MAKE_MEM_DEFINED() a\nbuffer that would otherwise be considered initialized by Valgrind --\nsometimes it's useful to make Valgrind understand that an area of\nmemory is garbage for all practical purposes. In other words,\nsometimes it's useful to go out of your way to work around the problem\nof meaningless initialization masking bugs (bugs that could otherwise\nbe detected by Valgrind).\n\nOf course it's also possible that initializing memory to some\nnominally safe value (e.g. using palloc0() rather than palloc()) makes\nsense as a defensive measure. It depends on the specific code, of\ncourse.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Mon, 14 Mar 2022 12:30:37 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: Fix uninitialized variable access\n (src/backend/utils/mmgr/freepage.c)" } ]
[ { "msg_contents": "I've recently been working with a PostgreSQL database where we saw\nReorderBufferToastReplace() fail due to a reltoastrelid value of 0.  In\nthe original thread, Amit pointed out a connection to the speculative\ninsert decoding memory leak bug.  Bertrand built a test case confirming\nthat a speculative abort record followed by an insert on another table\nwould in fact produce the error message from ReorderBufferToastReplace()\nthat we had seen, and accordingly we're pretty sure that was the root\ncause in this particular database.\n\nhttps://www.postgresql.org/message-id/5f9a118e-86c5-d4f1-b584-199a33757cd4%40amazon.com\n\nNonetheless, in the process of troubleshooting it occurred to me that\nthis error message would be more useful in general if it included the\nbase relation OID in the error message. Amit suggested a separate thread\n- so here we are.  :)\n\nhttps://www.postgresql.org/message-id/CAA4eK1%2BdeBBnMVPxfLuv1Aa7tuh-7e3FvnSvWTaCy4-_HPcBLg%40mail.gmail.com\n\nAnyone have thoughts?  Would I need a commitfest entry to propose a\ntwo-line tweak like this?\n\n-Jeremy\n\n\n\ndiff --git a/src/backend/replication/logical/reorderbuffer.c\nb/src/backend/replication/logical/reorderbuffer.c\nindex 2d9e1279bb..b90603b051 100644\n--- a/src/backend/replication/logical/reorderbuffer.c\n+++ b/src/backend/replication/logical/reorderbuffer.c\n@@ -4598,8 +4598,8 @@ ReorderBufferToastReplace(ReorderBuffer *rb,\nReorderBufferTXN *txn,\n\n        toast_rel = RelationIdGetRelation(relation->rd_rel->reltoastrelid);\n        if (!RelationIsValid(toast_rel))\n-               elog(ERROR, \"could not open relation with OID %u\",\n-                        relation->rd_rel->reltoastrelid);\n+               elog(ERROR, \"could not open toast relation with OID %u\n(base relation with OID %u)\",\n+                        relation->rd_rel->reltoastrelid,\nrelation->rd_rel->oid);\n\n        toast_desc = RelationGetDescr(toast_rel);\n\n\n-- \nJeremy Schneider\nDatabase Engineer\nAmazon Web Services\n\n\n\n\n\n\n\n I've recently been working with a PostgreSQL database where we saw\n ReorderBufferToastReplace() fail due to a reltoastrelid value of 0. \n In the original thread, Amit pointed out a connection to the\n speculative insert decoding memory leak bug.  Bertrand built a test\n case confirming that a speculative abort record followed by an\n insert on another table would in fact produce the error message from\n ReorderBufferToastReplace() that we had seen, and accordingly we're\n pretty sure that was the root cause in this particular database.\n\nhttps://www.postgresql.org/message-id/5f9a118e-86c5-d4f1-b584-199a33757cd4%40amazon.com\n\n Nonetheless, in the process of troubleshooting it occurred to me\n that this error message would be more useful in general if it\n included the base relation OID in the error message. Amit suggested\n a separate thread - so here we are.  :)\n\nhttps://www.postgresql.org/message-id/CAA4eK1%2BdeBBnMVPxfLuv1Aa7tuh-7e3FvnSvWTaCy4-_HPcBLg%40mail.gmail.com\n\n Anyone have thoughts?  Would I need a commitfest entry to propose a\n two-line tweak like this?\n\n -Jeremy\n\n\n\ndiff --git\n a/src/backend/replication/logical/reorderbuffer.c\n b/src/backend/replication/logical/reorderbuffer.c\n index 2d9e1279bb..b90603b051 100644\n --- a/src/backend/replication/logical/reorderbuffer.c\n +++ b/src/backend/replication/logical/reorderbuffer.c\n @@ -4598,8 +4598,8 @@ ReorderBufferToastReplace(ReorderBuffer *rb,\n ReorderBufferTXN *txn,\n\n         toast_rel =\n RelationIdGetRelation(relation->rd_rel->reltoastrelid);\n         if (!RelationIsValid(toast_rel))\n -               elog(ERROR, \"could not open relation with OID %u\",\n -                        relation->rd_rel->reltoastrelid);\n +               elog(ERROR, \"could not open toast relation with\n OID %u (base relation with OID %u)\",\n +                        relation->rd_rel->reltoastrelid,\n relation->rd_rel->oid);\n\n         toast_desc = RelationGetDescr(toast_rel);\n\n\n-- \nJeremy Schneider\nDatabase Engineer\nAmazon Web Services", "msg_date": "Thu, 1 Jul 2021 18:25:58 -0700", "msg_from": "Jeremy Schneider <schnjere@amazon.com>", "msg_from_op": true, "msg_subject": "relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "Jeremy Schneider <schnjere@amazon.com> writes:\n> Nonetheless, in the process of troubleshooting it occurred to me that\n> this error message would be more useful in general if it included the\n> base relation OID in the error message. Amit suggested a separate thread\n> - so here we are.  :)\n> Anyone have thoughts?  Would I need a commitfest entry to propose a\n> two-line tweak like this?\n\nProbably not, unless it slips through the cracks. But I wonder why\nprint the parent's OID, when we have access to its name.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 01 Jul 2021 21:56:40 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "\r\n> On Jul 1, 2021, at 18:57, Tom Lane <tgl@sss.pgh.pa.us> wrote:\r\n> \r\n> But I wonder why\r\n> print the parent's OID, when we have access to its name.\r\n\r\nSeems like a few people do schema-based multi-tenancy with similarly named relations in different namespaces, so I’d have a preference for OID over an unqualified relation name. Also the error message shows the OID for the toast relation so this is consistent.\r\n\r\nRelation name could work too though, especially if the schema was included\r\n\r\n-Jeremy\r\n\r\n\r\nSent from my TI-83\r\n\r\n", "msg_date": "Fri, 2 Jul 2021 02:12:01 +0000", "msg_from": "\"Schneider (AWS), Jeremy\" <schnjere@amazon.com>", "msg_from_op": false, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "\"Schneider (AWS), Jeremy\" <schnjere@amazon.com> writes:\n>> On Jul 1, 2021, at 18:57, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> But I wonder why\n>> print the parent's OID, when we have access to its name.\n\n> Seems like a few people do schema-based multi-tenancy with similarly named relations in different namespaces, so I’d have a preference for OID over an unqualified relation name. Also the error message shows the OID for the toast relation so this is consistent.\n\nUm, well, the reason we're printing the OID for the toast rel is exactly\nthat we've not been able to resolve an associated relation, so the OID is\nthe *only* thing that we have. But we do have the parent rel's relcache\nentry at hand.\n\n> Relation name could work too though, especially if the schema was included\n\nI'm definitely -1 on trying to print the schema name, because that would\nrequire an additional catalog fetch. If we're in this situation at all,\ncatalog fetches are suspect; we could easily end up hitting an additional\nfailure that will probably print a *totally* uninformative message.\n\nI understand your point about similarly-named tables in different schemas,\nbut Postgres' error messages are uniformly unfriendly to that case, and\nI'm not seeing a good reason why this one needs to be different. On the\nother side, I think there are good reasons not to print an OID when we\ndon't have to: there are plenty of situations where you can't readily\ntrace the OID to anything at all. For example, if this error showed up\nin a buildfarm run, what do you think the odds would be of identifying\nwhich table the OID referred to? Maybe you could do it given knowledge\nthat the error could only be referencing one of a few specific tables,\nbut it'd still be questionable.\n\nSo I think the relation name is what to print here. That's generally\nwhat we do, and there's not much argument for this case to be different.\n\n(I'm not unsympathetic to the idea that printing schema names\nwould be helpful. Just that here is not where to start with\nthat. Maybe we could consider sucking in the schema name\nduring relcache entry build, and then print from that copy so we\ndon't need an additional catalog fetch under error conditions?)\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 01 Jul 2021 23:44:44 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On 7/1/21 20:44, Tom Lane wrote:\n> So I think the relation name is what to print here. That's generally\n> what we do, and there's not much argument for this case to be different.\n\nWorks for me.  Anything in the error message is quickly and easily\nvisible to users, without attaching debuggers or decoding WAL. A lot of\nPostgreSQL users won't know how to do the advanced troubleshooting, but\nknowing the table name might give a clue for better identifying SQL that\nmight have been related, and could help produce better bug reports to\nthe mailing lists in the future.\n\nRelated to that, I do think it could be useful to backpatch this - we\nknow that users are hitting this error on older versions.  Even though\nit's most likely that the speculative insert decoding memory leak bug\nwill address the problem, this update still seems low-risk and useful to\nme just in the off-chance that someone hits the error again.\n\n> (I'm not unsympathetic to the idea that printing schema names\n> would be helpful. Just that here is not where to start with\n> that. Maybe we could consider sucking in the schema name\n> during relcache entry build, and then print from that copy so we\n> don't need an additional catalog fetch under error conditions?)\n\nAgreed not to tackle that in this thread; I wouldn't want to slow down\nthe simple tweak that could be useful.\n\n-Jeremy\n\n\ndiff --git a/src/backend/replication/logical/reorderbuffer.c\nb/src/backend/replication/logical/reorderbuffer.c\nindex b8c5e2a44e..6a3a35d05d 100644\n--- a/src/backend/replication/logical/reorderbuffer.c\n+++ b/src/backend/replication/logical/reorderbuffer.c\n@@ -4625,8 +4625,8 @@ ReorderBufferToastReplace(ReorderBuffer *rb,\nReorderBufferTXN *txn,\n\n        toast_rel = RelationIdGetRelation(relation->rd_rel->reltoastrelid);\n        if (!RelationIsValid(toast_rel))\n-               elog(ERROR, \"could not open relation with OID %u\",\n-                        relation->rd_rel->reltoastrelid);\n+               elog(ERROR, \"could not open toast relation with OID %u\n(base relation \\\"%s\\\")\",\n+                        relation->rd_rel->reltoastrelid,\nrelation->rd_rel->relname);\n\n        toast_desc = RelationGetDescr(toast_rel);\n\n\n\n-- \nJeremy Schneider\nDatabase Engineer\nAmazon Web Services\n\n\n\n\n\n\n\nOn 7/1/21 20:44, Tom Lane wrote:\n\n\nSo I think the relation name is what to print here. That's generally\nwhat we do, and there's not much argument for this case to be different.\n\n\n\n Works for me.  Anything in the error message is quickly and easily\n visible to users, without attaching debuggers or decoding WAL. A lot\n of PostgreSQL users won't know how to do the advanced\n troubleshooting, but knowing the table name might give a clue for\n better identifying SQL that might have been related, and could help\n produce better bug reports to the mailing lists in the future.\n\n Related to that, I do think it could be useful to backpatch this -\n we know that users are hitting this error on older versions.  Even\n though it's most likely that the speculative insert decoding memory\n leak bug will address the problem, this update still seems low-risk\n and useful to me just in the off-chance that someone hits the error\n again.\n\n\n(I'm not unsympathetic to the idea that printing schema names\nwould be helpful. Just that here is not where to start with\nthat. Maybe we could consider sucking in the schema name\nduring relcache entry build, and then print from that copy so we\ndon't need an additional catalog fetch under error conditions?)\n\n\n\n Agreed not to tackle that in this thread; I wouldn't want to slow\n down the simple tweak that could be useful.\n\n -Jeremy\n\n\ndiff --git\n a/src/backend/replication/logical/reorderbuffer.c\n b/src/backend/replication/logical/reorderbuffer.c\n index b8c5e2a44e..6a3a35d05d 100644\n --- a/src/backend/replication/logical/reorderbuffer.c\n +++ b/src/backend/replication/logical/reorderbuffer.c\n @@ -4625,8 +4625,8 @@ ReorderBufferToastReplace(ReorderBuffer *rb,\n ReorderBufferTXN *txn,\n\n         toast_rel =\n RelationIdGetRelation(relation->rd_rel->reltoastrelid);\n         if (!RelationIsValid(toast_rel))\n -               elog(ERROR, \"could not open relation with OID %u\",\n -                        relation->rd_rel->reltoastrelid);\n +               elog(ERROR, \"could not open toast relation with\n OID %u (base relation \\\"%s\\\")\",\n +                        relation->rd_rel->reltoastrelid,\n relation->rd_rel->relname);\n\n         toast_desc = RelationGetDescr(toast_rel);\n\n\n\n-- \nJeremy Schneider\nDatabase Engineer\nAmazon Web Services", "msg_date": "Fri, 2 Jul 2021 18:57:37 -0700", "msg_from": "Jeremy Schneider <schnjere@amazon.com>", "msg_from_op": true, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On 7/2/21 18:57, Jeremy Schneider wrote:\n> On 7/1/21 20:44, Tom Lane wrote:\n>> So I think the relation name is what to print here. That's generally\n>> what we do, and there's not much argument for this case to be different.\n>\n> Works for me.  Anything in the error message is quickly and easily\n> visible to users, without attaching debuggers or decoding WAL. A lot\n> of PostgreSQL users won't know how to do the advanced troubleshooting,\n> but knowing the table name might give a clue for better identifying\n> SQL that might have been related, and could help produce better bug\n> reports to the mailing lists in the future.\n>\n> Related to that, I do think it could be useful to backpatch this - we\n> know that users are hitting this error on older versions.  Even though\n> it's most likely that the speculative insert decoding memory leak bug\n> will address the problem, this update still seems low-risk and useful\n> to me just in the off-chance that someone hits the error again.\n\nI have a few new thoughts, related to some new recent developments here.\n\nBack in the first thread, Amit had asked if this was reproducible.  The\nuser we know of who recently ran into this error hasn't been able to\ncome up with a series of steps that reliably reproduces it, but they did\nencounter the error again last week on a test system after they left a\ntest workload running for some time. (This apparently took a bit of\neffort to do and isn't easily repeatable.)\n\nWe investigated that system and we verified specifically for the\nincident last week that it definitely did *not* hit the speculative\ninsert abort bug. We attached a debugger and checked the\nXLH_DELETE_IS_SUPER flag just before the decoding process error-ed out,\nand the flag was not set.\n\nWe looked a bit further at that system and while I'm not sure everything\nis solved, it did help Bertrand find one clear bug with an obvious\nreproduction. We have proposed that fix for the next commitfest:\n\nhttps://commitfest.postgresql.org/34/3241/\n\nThe process of trying to understand this recent incident has given me\nsome new insight about what information would be helpful up front in\nthis error message for faster resolution.\n\nFirst off, and most importantly, the current WAL record we're processing\nwhen the error is encountered. I wonder if it could easily print the LSN?\n\nSecondly, the transaction ID. In the specific bug Bertrand found, the\nproblem is actually not with the actual WAL record that's being\nprocessed - but rather because previous WAL records in the same\ntransaction left the decoder process in a state where the current WAL\nrecord [a commit] generated an error.  So it's the entire transaction\nthat needs to be examined to reproduce the error.  (Andres actually\npointed this out on the original thread back in December 2019.)  I\nrealize that once you know the LSN you can easily get the XID with\npg_waldump, but personally I'd just as soon include the XID in the error\nmessage since I think it will usually be a first step for debugging any\nproblems with WAL decoding. The I can go straight to filtering that XID\non my first pg_waldump run.\n\nThoughts?\n\n-Jeremy\n\n\n-- \nJeremy Schneider\nDatabase Engineer\nAmazon Web Services\n\n\n\n\n\n\n\nOn 7/2/21 18:57, Jeremy Schneider\n wrote:\n\n\n\nOn 7/1/21 20:44, Tom Lane wrote:\n\n\nSo I think the relation name is what to print here. That's generally\nwhat we do, and there's not much argument for this case to be different.\n\n\n\n Works for me.  Anything in the error message is quickly and easily\n visible to users, without attaching debuggers or decoding WAL. A\n lot of PostgreSQL users won't know how to do the advanced\n troubleshooting, but knowing the table name might give a clue for\n better identifying SQL that might have been related, and could\n help produce better bug reports to the mailing lists in the\n future.\n\n Related to that, I do think it could be useful to backpatch this -\n we know that users are hitting this error on older versions.  Even\n though it's most likely that the speculative insert decoding\n memory leak bug will address the problem, this update still seems\n low-risk and useful to me just in the off-chance that someone hits\n the error again.\n\n\n I have a few new thoughts, related to some new recent developments\n here.\n\n Back in the first thread, Amit had asked if this was reproducible. \n The user we know of who recently ran into this error hasn't been\n able to come up with a series of steps that reliably reproduces it,\n but they did encounter the error again last week on a test system\n after they left a test workload running for some time. (This\n apparently took a bit of effort to do and isn't easily repeatable.)\n\n We investigated that system and we verified specifically for the\n incident last week that it definitely did *not* hit the speculative\n insert abort bug. We attached a debugger and checked the XLH_DELETE_IS_SUPER\n flag just before the decoding process error-ed\n out, and the flag was not set.\n\n We looked a bit further at that system and while I'm not sure\n everything is solved, it did help Bertrand find one clear bug with\n an obvious reproduction. We have proposed that fix for the next\n commitfest:\n\nhttps://commitfest.postgresql.org/34/3241/\n\n The process of trying to understand this recent incident has given\n me some new insight about what information would be helpful up front\n in this error message for faster resolution.\n\n First off, and most importantly, the current WAL record we're\n processing when the error is encountered. I wonder if it could\n easily print the LSN?\n\n Secondly, the transaction ID. In the specific bug Bertrand found,\n the problem is actually not with the actual WAL record that's being\n processed - but rather because previous WAL records in the same\n transaction left the decoder process in a state where the current\n WAL record [a commit] generated an error.  So it's the entire\n transaction that needs to be examined to reproduce the error. \n (Andres actually pointed this out on the original thread back in\n December 2019.)  I realize that once you know the LSN you can easily\n get the XID with pg_waldump, but personally I'd just as soon include\n the XID in the error message since I think it will usually be a\n first step for debugging any problems with WAL decoding. The I can\n go straight to filtering that XID on my first pg_waldump run.\n\n Thoughts?\n\n -Jeremy\n\n\n-- \nJeremy Schneider\nDatabase Engineer\nAmazon Web Services", "msg_date": "Wed, 14 Jul 2021 17:44:34 -0700", "msg_from": "Jeremy Schneider <schnjere@amazon.com>", "msg_from_op": true, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On Thu, Jul 15, 2021 at 6:14 AM Jeremy Schneider <schnjere@amazon.com> wrote:\n>\n> On 7/2/21 18:57, Jeremy Schneider wrote:\n>\n> The process of trying to understand this recent incident has given me some new insight about what information would be helpful up front in this error message for faster resolution.\n>\n> First off, and most importantly, the current WAL record we're processing when the error is encountered. I wonder if it could easily print the LSN?\n>\n> Secondly, the transaction ID. In the specific bug Bertrand found, the problem is actually not with the actual WAL record that's being processed - but rather because previous WAL records in the same transaction left the decoder process in a state where the current WAL record [a commit] generated an error. So it's the entire transaction that needs to be examined to reproduce the error. (Andres actually pointed this out on the original thread back in December 2019.) I realize that once you know the LSN you can easily get the XID with pg_waldump, but personally I'd just as soon include the XID in the error message since I think it will usually be a first step for debugging any problems with WAL decoding. The I can go straight to filtering that XID on my first pg_waldump run.\n>\n\nI don't think it is a bad idea to print additional information as you\nare suggesting but why only for this error? It could be useful to\ninvestigate any other error we get during decoding. I think normally\nwe add such additional information via error_context. We have recently\nadded/enhanced it for apply-workers, see commit [1].\n\nI think here we should just print the relation name in the error\nmessage you pointed out and then work on adding additional information\nvia error context as a separate patch. What do you think?\n\n[1] - https://git.postgresql.org/gitweb/?p=postgresql.git;a=commitdiff;h=abc0910e2e0adfc5a17e035465ee31242e32c4fc\n\n\n--\nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Fri, 17 Sep 2021 10:53:09 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On Fri, Sep 17, 2021 at 10:53 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> I don't think it is a bad idea to print additional information as you\n> are suggesting but why only for this error? It could be useful to\n> investigate any other error we get during decoding. I think normally\n> we add such additional information via error_context. We have recently\n> added/enhanced it for apply-workers, see commit [1].\n>\n> I think here we should just print the relation name in the error\n> message you pointed out and then work on adding additional information\n> via error context as a separate patch. What do you think?\n>\n\nAttached please find the patch which just modifies the current error\nmessage as proposed by you. I am planning to commit it in a day or two\nunless there are comments or any other suggestions.\n\n-- \nWith Regards,\nAmit Kapila.", "msg_date": "Tue, 21 Sep 2021 10:44:37 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On 9/20/21 22:14, Amit Kapila wrote:\n> On Fri, Sep 17, 2021 at 10:53 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>>\n>> I don't think it is a bad idea to print additional information as you\n>> are suggesting but why only for this error? It could be useful to\n>> investigate any other error we get during decoding. I think normally\n>> we add such additional information via error_context. We have recently\n>> added/enhanced it for apply-workers, see commit [1].\n>>\n>> I think here we should just print the relation name in the error\n>> message you pointed out and then work on adding additional information\n>> via error context as a separate patch. What do you think?\n> \n> Attached please find the patch which just modifies the current error\n> message as proposed by you. I am planning to commit it in a day or two\n> unless there are comments or any other suggestions.\n\nLooks good to me. I see that I hadn't used the macro for getting the\nrelation name, thanks for fixing that!\n\n-Jeremy\n\n\n-- \nJeremy Schneider\nDatabase Engineer\nAmazon Web Services\n\n\n", "msg_date": "Tue, 21 Sep 2021 13:47:54 -0700", "msg_from": "Jeremy Schneider <schnjere@amazon.com>", "msg_from_op": true, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On Wed, Sep 22, 2021 at 2:17 AM Jeremy Schneider <schnjere@amazon.com> wrote:\n>\n> On 9/20/21 22:14, Amit Kapila wrote:\n> > On Fri, Sep 17, 2021 at 10:53 AM Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >>\n> >> I don't think it is a bad idea to print additional information as you\n> >> are suggesting but why only for this error? It could be useful to\n> >> investigate any other error we get during decoding. I think normally\n> >> we add such additional information via error_context. We have recently\n> >> added/enhanced it for apply-workers, see commit [1].\n> >>\n> >> I think here we should just print the relation name in the error\n> >> message you pointed out and then work on adding additional information\n> >> via error context as a separate patch. What do you think?\n> >\n> > Attached please find the patch which just modifies the current error\n> > message as proposed by you. I am planning to commit it in a day or two\n> > unless there are comments or any other suggestions.\n>\n> Looks good to me. I see that I hadn't used the macro for getting the\n> relation name, thanks for fixing that!\n>\n\nPushed.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Wed, 22 Sep 2021 09:28:22 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On 9/21/21 20:58, Amit Kapila wrote:\n> On 9/20/21 22:14, Amit Kapila wrote:\n>>> Attached please find the patch which just modifies the current error\n>>> message as proposed by you. I am planning to commit it in a day or two\n>>> unless there are comments or any other suggestions.\n>>>\n> Pushed.\n\nAny chance of back-patching this? That would get it much sooner into the\nparticular PG system whose encounter with a logical replication bug\ninspired it.\n\n-Jeremy\n\n\n-- \nJeremy Schneider\nDatabase Engineer\nAmazon Web Services\n\n\n\n\n\n\n\nOn 9/21/21 20:58, Amit Kapila wrote:\n\n\nOn 9/20/21 22:14, Amit Kapila wrote:\n\n\n\nAttached please find the patch which just modifies the current error\nmessage as proposed by you. I am planning to commit it in a day or two\nunless there are comments or any other suggestions.\n\n\n\n\n\nPushed.\n\n\n\n Any chance of back-patching this? That would get it much sooner into\n the particular PG system whose encounter with a logical replication\n bug inspired it.\n\n -Jeremy\n\n\n-- \nJeremy Schneider\nDatabase Engineer\nAmazon Web Services", "msg_date": "Wed, 22 Sep 2021 14:36:53 -0700", "msg_from": "Jeremy Schneider <schnjere@amazon.com>", "msg_from_op": true, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On Thu, Sep 23, 2021 at 3:06 AM Jeremy Schneider <schnjere@amazon.com> wrote:\n>\n> On 9/21/21 20:58, Amit Kapila wrote:\n>\n> On 9/20/21 22:14, Amit Kapila wrote:\n>\n> Attached please find the patch which just modifies the current error\n> message as proposed by you. I am planning to commit it in a day or two\n> unless there are comments or any other suggestions.\n>\n> Pushed.\n>\n>\n> Any chance of back-patching this?\n>\n\nNormally, we don't back-patch code improvements unless they fix some\nbug or avoid future back-patch efforts. So, I am not inclined to\nback-patch this but if others also feel strongly about this then we\ncan consider it.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 23 Sep 2021 08:41:56 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On 9/22/21 20:11, Amit Kapila wrote:\n> \n> On Thu, Sep 23, 2021 at 3:06 AM Jeremy Schneider <schnjere@amazon.com> wrote:\n>>\n>> Any chance of back-patching this?\n> \n> Normally, we don't back-patch code improvements unless they fix some\n> bug or avoid future back-patch efforts. So, I am not inclined to\n> back-patch this but if others also feel strongly about this then we\n> can consider it.\n\nThe original thread about the logical replication bugs spawned a few\ndifferent threads and code changes. The other code changes coming out of\nthose threads were all back-patched, but I guess I can see arguments\nboth ways on this one.\n\nhttps://www.postgresql.org/message-id/flat/E1i74JE-00024V-9J%40gemulon.postgresql.org\n\nhttps://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=29b5905470285bf730f6fe7cc5ddb3513d0e6945\n\nhttps://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=df3640e5293dccbf964508babfc067282ea7a2fc\n\n\n-- \nJeremy Schneider\nDatabase Engineer\nAmazon Web Services\n\n\n", "msg_date": "Thu, 23 Sep 2021 11:19:39 -0700", "msg_from": "Jeremy Schneider <schnjere@amazon.com>", "msg_from_op": true, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On 2021-Sep-23, Jeremy Schneider wrote:\n\n> On 9/22/21 20:11, Amit Kapila wrote:\n> > \n> > On Thu, Sep 23, 2021 at 3:06 AM Jeremy Schneider <schnjere@amazon.com> wrote:\n> >>\n> >> Any chance of back-patching this?\n> > \n> > Normally, we don't back-patch code improvements unless they fix some\n> > bug or avoid future back-patch efforts. So, I am not inclined to\n> > back-patch this but if others also feel strongly about this then we\n> > can consider it.\n> \n> The original thread about the logical replication bugs spawned a few\n> different threads and code changes. The other code changes coming out of\n> those threads were all back-patched, but I guess I can see arguments\n> both ways on this one.\n\nI think that for patches that are simple debugging aids we do\nbackpatch, with the intent to get them deployed in users' systems as\nsoon and as widely possible. I did that in this one, for example\n\nAuthor: Alvaro Herrera <alvherre@alvh.no-ip.org>\nBranch: master [961dd7565] 2021-08-30 16:29:12 -0400\nBranch: REL_14_STABLE [eae08e216] 2021-08-30 16:29:12 -0400\nBranch: REL_13_STABLE [6197d7b53] 2021-08-30 16:29:12 -0400\nBranch: REL_12_STABLE [fa8ae19be] 2021-08-30 16:29:12 -0400\nBranch: REL_11_STABLE [0105b7aaa] 2021-08-30 16:29:12 -0400\nBranch: REL_10_STABLE [02797ffa9] 2021-08-30 16:29:12 -0400\nBranch: REL9_6_STABLE [37e468252] 2021-08-30 16:29:12 -0400\n\n Report tuple address in data-corruption error message\n\n Most data-corruption reports mention the location of the problem, but\n this one failed to. Add it.\n \n Backpatch all the way back. In 12 and older, also assign the\n ERRCODE_DATA_CORRUPTED error code as was done in commit fd6ec93bf890 for\n 13 and later.\n \n Discussion: https://postgr.es/m/202108191637.oqyzrdtnheir@alvherre.pgsql\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\"I'm impressed how quickly you are fixing this obscure issue. I came from \nMS SQL and it would be hard for me to put into words how much of a better job\nyou all are doing on [PostgreSQL].\"\n Steve Midgley, http://archives.postgresql.org/pgsql-sql/2008-08/msg00000.php\n\n\n", "msg_date": "Thu, 23 Sep 2021 15:25:13 -0300", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On 9/23/21, 11:26 AM, \"Alvaro Herrera\" <alvherre@alvh.no-ip.org> wrote:\r\n> On 2021-Sep-23, Jeremy Schneider wrote:\r\n>\r\n>> On 9/22/21 20:11, Amit Kapila wrote:\r\n>> >\r\n>> > On Thu, Sep 23, 2021 at 3:06 AM Jeremy Schneider <schnjere@amazon.com> wrote:\r\n>> >>\r\n>> >> Any chance of back-patching this?\r\n>> >\r\n>> > Normally, we don't back-patch code improvements unless they fix some\r\n>> > bug or avoid future back-patch efforts. So, I am not inclined to\r\n>> > back-patch this but if others also feel strongly about this then we\r\n>> > can consider it.\r\n>>\r\n>> The original thread about the logical replication bugs spawned a few\r\n>> different threads and code changes. The other code changes coming out of\r\n>> those threads were all back-patched, but I guess I can see arguments\r\n>> both ways on this one.\r\n>\r\n> I think that for patches that are simple debugging aids we do\r\n> backpatch, with the intent to get them deployed in users' systems as\r\n> soon and as widely possible. I did that in this one, for example\r\n\r\n+1 for back-patching\r\n\r\nNathan\r\n\r\n", "msg_date": "Thu, 14 Oct 2021 22:10:36 +0000", "msg_from": "\"Bossart, Nathan\" <bossartn@amazon.com>", "msg_from_op": false, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On Fri, Oct 15, 2021 at 3:40 AM Bossart, Nathan <bossartn@amazon.com> wrote:\n>\n> On 9/23/21, 11:26 AM, \"Alvaro Herrera\" <alvherre@alvh.no-ip.org> wrote:\n> > On 2021-Sep-23, Jeremy Schneider wrote:\n> >\n> >> On 9/22/21 20:11, Amit Kapila wrote:\n> >> >\n> >> > On Thu, Sep 23, 2021 at 3:06 AM Jeremy Schneider <schnjere@amazon.com> wrote:\n> >> >>\n> >> >> Any chance of back-patching this?\n> >> >\n> >> > Normally, we don't back-patch code improvements unless they fix some\n> >> > bug or avoid future back-patch efforts. So, I am not inclined to\n> >> > back-patch this but if others also feel strongly about this then we\n> >> > can consider it.\n> >>\n> >> The original thread about the logical replication bugs spawned a few\n> >> different threads and code changes. The other code changes coming out of\n> >> those threads were all back-patched, but I guess I can see arguments\n> >> both ways on this one.\n> >\n> > I think that for patches that are simple debugging aids we do\n> > backpatch, with the intent to get them deployed in users' systems as\n> > soon and as widely possible. I did that in this one, for example\n>\n> +1 for back-patching\n>\n\nI can take care of backpatching this in the next few days unless there\nis any objection.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 18 Oct 2021 15:59:48 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On 10/18/21, 3:31 AM, \"Amit Kapila\" <amit.kapila16@gmail.com> wrote:\r\n> I can take care of backpatching this in the next few days unless there\r\n> is any objection.\r\n\r\nThanks!\r\n\r\nNathan\r\n\r\n", "msg_date": "Mon, 18 Oct 2021 23:30:36 +0000", "msg_from": "\"Bossart, Nathan\" <bossartn@amazon.com>", "msg_from_op": false, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On Mon, Oct 18, 2021 at 3:59 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>\n> On Fri, Oct 15, 2021 at 3:40 AM Bossart, Nathan <bossartn@amazon.com> wrote:\n> >\n> > On 9/23/21, 11:26 AM, \"Alvaro Herrera\" <alvherre@alvh.no-ip.org> wrote:\n> > > On 2021-Sep-23, Jeremy Schneider wrote:\n> > >\n> > >> On 9/22/21 20:11, Amit Kapila wrote:\n> > >> >\n> > >> > On Thu, Sep 23, 2021 at 3:06 AM Jeremy Schneider <schnjere@amazon.com> wrote:\n> > >> >>\n> > >> >> Any chance of back-patching this?\n> > >> >\n> > >> > Normally, we don't back-patch code improvements unless they fix some\n> > >> > bug or avoid future back-patch efforts. So, I am not inclined to\n> > >> > back-patch this but if others also feel strongly about this then we\n> > >> > can consider it.\n> > >>\n> > >> The original thread about the logical replication bugs spawned a few\n> > >> different threads and code changes. The other code changes coming out of\n> > >> those threads were all back-patched, but I guess I can see arguments\n> > >> both ways on this one.\n> > >\n> > > I think that for patches that are simple debugging aids we do\n> > > backpatch, with the intent to get them deployed in users' systems as\n> > > soon and as widely possible. I did that in this one, for example\n> >\n> > +1 for back-patching\n> >\n>\n> I can take care of backpatching this in the next few days unless there\n> is any objection.\n>\n\nDone.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 21 Oct 2021 14:12:10 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" }, { "msg_contents": "On 10/21/21 01:42, Amit Kapila wrote:\n> On Mon, Oct 18, 2021 at 3:59 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n>> I can take care of backpatching this in the next few days unless\n>> there is any objection. \n> Done.\n\nThanks Amit\n\n\n-Jeremy\n\n-- \nJeremy Schneider\nDatabase Engineer\nAmazon Web Services\n\n\n\n\n\n\nOn 10/21/21 01:42, Amit Kapila wrote:\n \n\nOn Mon, Oct 18, 2021 at 3:59 PM Amit Kapila <amit.kapila16@gmail.com> wrote:\n\nI can take care of backpatching this in\n the next few days unless there is any objection.\n \n\nDone.\n\n\n\n Thanks Amit\n\n\n -Jeremy\n\n-- \nJeremy Schneider\nDatabase Engineer\nAmazon Web Services", "msg_date": "Thu, 21 Oct 2021 09:12:14 -0700", "msg_from": "Jeremy Schneider <schnjere@amazon.com>", "msg_from_op": true, "msg_subject": "Re: relation OID in ReorderBufferToastReplace error message" } ]
[ { "msg_contents": "Hi,\n\nI found a strange behavior when there is an insert after renaming the\nschema. The test steps for the same are given below, Here after the\nschema is renamed, the renamed schema table data should not be sent,\nbut the data was being sent. I felt the schema invalidation was not\ncalled, attached a patch to handle the same. Thoughts?\n\nstep 1)\nCreate schema sch1;\nCreate table sch1.t1(c1 int);\nCREATE PUBLICATION mypub1 FOR all tables;\n\nStep 2)\nCREATE SCHEMA sch1;\nCREATE TABLE sch1.t1(c1 int);\nCREATE SCHEMA sch2;\nCREATE TABLE sch2.t1(c1 int);\nCREATE TABLE t1(c1 int);\nCREATE SUBSCRIPTION mysub1 CONNECTION 'host=localhost port=5432\ndbname=postgres' PUBLICATION mypub1;\n\nStep 3)\nbegin;\ninsert into sch1.t1 values(1);\nalter schema sch1 rename to sch2;\ncreate schema sch1;\ncreate table sch1.t1(c1 int);\ninsert into sch1.t1 values(2);\ninsert into sch2.t1 values(3);\ncommit;\n\nstep 4)\nselect * from sch1.t1; # In subscriber\nGot:\nc1\n----\n 1\n 2\n 3\n(3 rows)\n\nExpected:\nc1\n----\n 1\n 2\n(2 rows)\n\nRegards,\nVignesh", "msg_date": "Fri, 2 Jul 2021 11:11:32 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": true, "msg_subject": "Logical replication - schema change not invalidating the relation\n cache" }, { "msg_contents": "On Fri, Jul 2, 2021 at 11:11 AM vignesh C <vignesh21@gmail.com> wrote:\n>\n> Hi,\n>\n> I found a strange behavior when there is an insert after renaming the\n> schema. The test steps for the same are given below, Here after the\n> schema is renamed, the renamed schema table data should not be sent,\n> but the data was being sent. I felt the schema invalidation was not\n> called, attached a patch to handle the same. Thoughts?\n>\n> step 1)\n> Create schema sch1;\n> Create table sch1.t1(c1 int);\n> CREATE PUBLICATION mypub1 FOR all tables;\n>\n> Step 2)\n> CREATE SCHEMA sch1;\n> CREATE TABLE sch1.t1(c1 int);\n> CREATE SCHEMA sch2;\n> CREATE TABLE sch2.t1(c1 int);\n> CREATE TABLE t1(c1 int);\n> CREATE SUBSCRIPTION mysub1 CONNECTION 'host=localhost port=5432\n> dbname=postgres' PUBLICATION mypub1;\n>\n> Step 3)\n> begin;\n> insert into sch1.t1 values(1);\n> alter schema sch1 rename to sch2;\n> create schema sch1;\n> create table sch1.t1(c1 int);\n> insert into sch1.t1 values(2);\n> insert into sch2.t1 values(3);\n> commit;\n>\n> step 4)\n> select * from sch1.t1; # In subscriber\n> Got:\n> c1\n> ----\n> 1\n> 2\n> 3\n> (3 rows)\n>\n> Expected:\n> c1\n> ----\n> 1\n> 2\n> (2 rows)\n\nYeah, this looks like a bug. I will look at the patch.\n\n-- \nRegards,\nDilip Kumar\nEnterpriseDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 2 Jul 2021 12:03:38 +0530", "msg_from": "Dilip Kumar <dilipbalaut@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Logical replication - schema change not invalidating the relation\n cache" }, { "msg_contents": "On Fri, Jul 2, 2021 at 12:03 PM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n>\n> Yeah, this looks like a bug. I will look at the patch.\n>\n\nWhile looking into this, I think the main cause of the problem is that\nschema rename does not invalidate the relation cache right? I also\ntried other cases e.g. if there is an open cursor and we rename the\nschema\n\nCREATE SCHEMA sch1;\nCREATE TABLE sch1.t1(c1 int);\ninsert into sch1.t1 values(1);\ninsert into sch1.t1 values(2);\ninsert into sch1.t1 values(3);\nBEGIN;\nDECLARE mycur CURSOR FOR SELECT * FROM sch1.t1;\nFETCH NEXT FROM mycur ;\n----------At this point rename sch1 to sch2 from another session------\nFETCH NEXT FROM mycur ;\nUPDATE sch2.t1 SET c1 = 20 WHERE CURRENT OF mycur;\nselect * from sch2.t1 ;\n\nSo even after the schema rename the cursor is able to fetch and its\nalso able to update on the same table in the new schema, ideally using\nCURRENT OF CUR, you can update the same table for which you have\ndeclared the cursor. I am giving this example because this behavior\nalso looks somewhat similar.\n\n-- \nRegards,\nDilip Kumar\nEnterpriseDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Sat, 3 Jul 2021 11:23:06 +0530", "msg_from": "Dilip Kumar <dilipbalaut@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Logical replication - schema change not invalidating the relation\n cache" }, { "msg_contents": "On Sat, Jul 3, 2021 at 11:23 AM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n>\n> On Fri, Jul 2, 2021 at 12:03 PM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n> >\n> > Yeah, this looks like a bug. I will look at the patch.\n> >\n>\n> While looking into this, I think the main cause of the problem is that\n> schema rename does not invalidate the relation cache right? I also\n> tried other cases e.g. if there is an open cursor and we rename the\n> schema\n>\n> CREATE SCHEMA sch1;\n> CREATE TABLE sch1.t1(c1 int);\n> insert into sch1.t1 values(1);\n> insert into sch1.t1 values(2);\n> insert into sch1.t1 values(3);\n> BEGIN;\n> DECLARE mycur CURSOR FOR SELECT * FROM sch1.t1;\n> FETCH NEXT FROM mycur ;\n> ----------At this point rename sch1 to sch2 from another session------\n> FETCH NEXT FROM mycur ;\n> UPDATE sch2.t1 SET c1 = 20 WHERE CURRENT OF mycur;\n> select * from sch2.t1 ;\n>\n> So even after the schema rename the cursor is able to fetch and its\n> also able to update on the same table in the new schema, ideally using\n> CURRENT OF CUR, you can update the same table for which you have\n> declared the cursor. I am giving this example because this behavior\n> also looks somewhat similar.\n\nIt works in this case because it uses the relation id for performing\nthe next fetch and the relation id does not get changed after renaming\nthe schema. Also since it holds a lock on the relation, alter/drop\noperations will not be allowed. I felt this behavior might be ok. But\nthe original scenario reported is an issue because it replicates the\ndata of both the original table and the renamed schema's table.\n\nRegards,\nVignesh\n\n\n", "msg_date": "Fri, 16 Jul 2021 22:51:23 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Logical replication - schema change not invalidating the relation\n cache" }, { "msg_contents": "On Fri, Jul 16, 2021 at 10:51 PM vignesh C <vignesh21@gmail.com> wrote:\n>\n> On Sat, Jul 3, 2021 at 11:23 AM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n> >\n> > On Fri, Jul 2, 2021 at 12:03 PM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n> > >\n> > > Yeah, this looks like a bug. I will look at the patch.\n> > >\n> >\n> > While looking into this, I think the main cause of the problem is that\n> > schema rename does not invalidate the relation cache right? I also\n> > tried other cases e.g. if there is an open cursor and we rename the\n> > schema\n> >\n> > CREATE SCHEMA sch1;\n> > CREATE TABLE sch1.t1(c1 int);\n> > insert into sch1.t1 values(1);\n> > insert into sch1.t1 values(2);\n> > insert into sch1.t1 values(3);\n> > BEGIN;\n> > DECLARE mycur CURSOR FOR SELECT * FROM sch1.t1;\n> > FETCH NEXT FROM mycur ;\n> > ----------At this point rename sch1 to sch2 from another session------\n> > FETCH NEXT FROM mycur ;\n> > UPDATE sch2.t1 SET c1 = 20 WHERE CURRENT OF mycur;\n> > select * from sch2.t1 ;\n> >\n> > So even after the schema rename the cursor is able to fetch and its\n> > also able to update on the same table in the new schema, ideally using\n> > CURRENT OF CUR, you can update the same table for which you have\n> > declared the cursor. I am giving this example because this behavior\n> > also looks somewhat similar.\n>\n> It works in this case because it uses the relation id for performing\n> the next fetch and the relation id does not get changed after renaming\n> the schema. Also since it holds a lock on the relation, alter/drop\n> operations will not be allowed. I felt this behavior might be ok. But\n> the original scenario reported is an issue because it replicates the\n> data of both the original table and the renamed schema's table.\n\nThe previous patch was failing because of the recent test changes made\nby commit 201a76183e2 which unified new and get_new_node, attached\npatch has the changes to handle the changes accordingly.\n\nRegards,\nVignesh", "msg_date": "Thu, 26 Aug 2021 21:00:39 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Logical replication - schema change not invalidating the relation\n cache" }, { "msg_contents": "On Thu, Aug 26, 2021 at 09:00:39PM +0530, vignesh C wrote:\n> The previous patch was failing because of the recent test changes made\n> by commit 201a76183e2 which unified new and get_new_node, attached\n> patch has the changes to handle the changes accordingly.\n\nPlease note that the CF app is complaining about this patch, so a\nrebase is required. I have moved it to next CF, waiting on author,\nfor now.\n--\nMichael", "msg_date": "Fri, 3 Dec 2021 16:43:20 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Logical replication - schema change not invalidating the\n relation cache" }, { "msg_contents": "On Fri, Dec 3, 2021 at 1:13 PM Michael Paquier <michael@paquier.xyz> wrote:\n>\n> On Thu, Aug 26, 2021 at 09:00:39PM +0530, vignesh C wrote:\n> > The previous patch was failing because of the recent test changes made\n> > by commit 201a76183e2 which unified new and get_new_node, attached\n> > patch has the changes to handle the changes accordingly.\n>\n> Please note that the CF app is complaining about this patch, so a\n> rebase is required. I have moved it to next CF, waiting on author,\n> for now.\n\nThanks for letting me know, I have rebased it on top of HEAD, the\nattached v2 version has the rebased changes.\n\nRegards,\nVignesh", "msg_date": "Fri, 3 Dec 2021 15:21:48 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Logical replication - schema change not invalidating the relation\n cache" }, { "msg_contents": "On Fri, Dec 3, 2021 at 3:21 PM vignesh C <vignesh21@gmail.com> wrote:\n>\n> On Fri, Dec 3, 2021 at 1:13 PM Michael Paquier <michael@paquier.xyz> wrote:\n> >\n> > On Thu, Aug 26, 2021 at 09:00:39PM +0530, vignesh C wrote:\n> > > The previous patch was failing because of the recent test changes made\n> > > by commit 201a76183e2 which unified new and get_new_node, attached\n> > > patch has the changes to handle the changes accordingly.\n> >\n> > Please note that the CF app is complaining about this patch, so a\n> > rebase is required. I have moved it to next CF, waiting on author,\n> > for now.\n>\n> Thanks for letting me know, I have rebased it on top of HEAD, the\n> attached v2 version has the rebased changes.\n\nThe patch was not applying on top of the HEAD, attached v3 version\nwhich has the rebased changes.\n\nRegards,\nVignesh", "msg_date": "Sat, 12 Mar 2022 13:29:06 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Logical replication - schema change not invalidating the relation\n cache" }, { "msg_contents": "On Sat, Mar 12, 2022 at 1:29 PM vignesh C <vignesh21@gmail.com> wrote:\n>\n> On Fri, Dec 3, 2021 at 3:21 PM vignesh C <vignesh21@gmail.com> wrote:\n> >\n> > On Fri, Dec 3, 2021 at 1:13 PM Michael Paquier <michael@paquier.xyz> wrote:\n> > >\n> > > On Thu, Aug 26, 2021 at 09:00:39PM +0530, vignesh C wrote:\n> > > > The previous patch was failing because of the recent test changes made\n> > > > by commit 201a76183e2 which unified new and get_new_node, attached\n> > > > patch has the changes to handle the changes accordingly.\n> > >\n> > > Please note that the CF app is complaining about this patch, so a\n> > > rebase is required. I have moved it to next CF, waiting on author,\n> > > for now.\n> >\n> > Thanks for letting me know, I have rebased it on top of HEAD, the\n> > attached v2 version has the rebased changes.\n>\n> The patch was not applying on top of the HEAD, attached v3 version\n> which has the rebased changes.\n\nThe patch needed to be rebased on top of HEAD because of commit\n\"0c20dd33db1607d6a85ffce24238c1e55e384b49\", attached a rebased v3\nversion for the changes of the same.\n\nRegards,\nVignesh", "msg_date": "Mon, 8 Aug 2022 12:10:53 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Logical replication - schema change not invalidating the relation\n cache" }, { "msg_contents": "vignesh C <vignesh21@gmail.com> writes:\n> [ v3-0001-Fix-for-invalidating-logical-replication-relation.patch ]\n\n(btw, please don't send multiple patch versions with the same number,\nit's very confusing.)\n\nI looked briefly at this patch. I wonder why you wrote a whole new\ncallback function instead of just using rel_sync_cache_publication_cb\nfor this case too.\n\nThe bigger picture here though is that in examples such as the one\nyou gave at the top of the thread, it's not very clear to me that\nthere's *any* principled behavior. If the connection between publisher\nand subscriber tables is only the relation name, fine ... but exactly\nwhich relation name applies? If you've got a transaction that is both\ninserting some data and renaming the table, it's really debatable which\ninsertions should be sent under which name(s). So how much should we\nactually care about such cases? Do we really want to force a cache flush\nany time somebody changes a (possibly unrelated) pg_namespace entry?\nWe could be giving up significant performance and not accomplishing much\nexcept changing from one odd behavior to a different one.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 04 Jan 2023 16:47:41 -0500", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Logical replication - schema change not invalidating the relation\n cache" }, { "msg_contents": "On Thu, 5 Jan 2023 at 03:17, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> vignesh C <vignesh21@gmail.com> writes:\n> > [ v3-0001-Fix-for-invalidating-logical-replication-relation.patch ]\n>\n> (btw, please don't send multiple patch versions with the same number,\n> it's very confusing.)\n\nSince it was just rebasing on top of HEAD, I did not change the\nversion, I will take care of this point in the later versions.\n\n> I looked briefly at this patch. I wonder why you wrote a whole new\n> callback function instead of just using rel_sync_cache_publication_cb\n> for this case too.\n\nYes we can use rel_sync_cache_publication_cb itself for the\ninvalidation of the relations, I have changed it.\n\n> The bigger picture here though is that in examples such as the one\n> you gave at the top of the thread, it's not very clear to me that\n> there's *any* principled behavior. If the connection between publisher\n> and subscriber tables is only the relation name, fine ... but exactly\n> which relation name applies? If you've got a transaction that is both\n> inserting some data and renaming the table, it's really debatable which\n> insertions should be sent under which name(s). So how much should we\n> actually care about such cases? Do we really want to force a cache flush\n> any time somebody changes a (possibly unrelated) pg_namespace entry?\n> We could be giving up significant performance and not accomplishing much\n> except changing from one odd behavior to a different one.\n\nThe connection between publisher and subscriber table is based on\nrelation id, During the first change relid, relname and schema name\nfrom publisher will be sent to the subscriber. Subscriber stores these\nid, relname and schema name in the LogicalRepRelMap hash for which\nrelation id is the key. Subsequent data received in the subscriber\nwill use the relation id received from the publisher and apply the\nchanges in the subscriber.\nThe problem does not stop even after the transaction that renames the\nschema is completed(Step3 in first mail). Even after the transaction\nis completed i.e after Step 3 the inserts of sch1.t1 and sch2.t1 both\nget replicated to sch1.t1 in the subscriber side. This happens because\nthe publisher id's of sch2.t1 and sch1.t1 are mapped to sch1.t1 in the\nsubscriber side and both inserts are successful.\nStep4) In Publisher\npostgres=# insert into sch2.t1 values(11);\nINSERT 0 1\npostgres=# insert into sch1.t1 values(12);\nINSERT 0 1\n\nStep5) In Subscriber\npostgres=# select * from sch1.t1;\n c1\n----\n 11\n 12\n(2 rows)\n\nDuring the sch1.t1 first insertion the relid, relname and schema name\nfrom publisher will be sent to the subscriber, this entry will be\nmapped to sch1.t1 in subscriber side and any insert from the publisher\nwill insert to sch1.t1.\nAfter the rename of schema(relid will not be changed) since this entry\nis not invalidated, even though we are inserting to sch2.t1 as the\nrelid is not changed, subscriber will continue to insert into sch1.t1\nin subscriber.\nDuring the first insert of new table sch1.t1, the relid, relname and\nschema name from publisher will be sent to the subscriber, this entry\nwill be again mapped to sch1.t1 in the subscriber side.\nSince both the entries sch1.t1 and sch2.t1 are mapped to sch1.t1 in\nthe subscriber side, both inserts will insert to the same table.\nThis issue will get fixed if we invalidate the relation and update the\nrelmap in the subscriber.\nI did not like the behavior where any insert on sch1.t1 or sch2.t1\nreplicates the changes to sch1.t1 in the subscriber. I felt it might\nbe better to fix this issue. I agree that the invalidations are\ncostly. If you feel this is a very corner case then we can skip it.\n\nAttached an updated patch.\n\nRegards,\nVignesh", "msg_date": "Thu, 5 Jan 2023 17:32:12 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Logical replication - schema change not invalidating the relation\n cache" }, { "msg_contents": "vignesh C <vignesh21@gmail.com> writes:\n> On Thu, 5 Jan 2023 at 03:17, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> The bigger picture here though is that in examples such as the one\n>> you gave at the top of the thread, it's not very clear to me that\n>> there's *any* principled behavior. If the connection between publisher\n>> and subscriber tables is only the relation name, fine ... but exactly\n>> which relation name applies?\n\n> The connection between publisher and subscriber table is based on\n> relation id, During the first change relid, relname and schema name\n> from publisher will be sent to the subscriber. Subscriber stores these\n> id, relname and schema name in the LogicalRepRelMap hash for which\n> relation id is the key. Subsequent data received in the subscriber\n> will use the relation id received from the publisher and apply the\n> changes in the subscriber.\n\nHm. I spent some time cleaning up this patch, and found that there's\nstill a problem. ISTM that the row with value \"3\" ought to end up\nin the subscriber's sch2.t1 table, but it does not: the attached\ntest script fails with\n\nt/100_bugs.pl .. 6/? \n# Failed test 'check data in subscriber sch2.t1 after schema rename'\n# at t/100_bugs.pl line 361.\n# got: ''\n# expected: '3'\n# Looks like you failed 1 test of 9.\nt/100_bugs.pl .. Dubious, test returned 1 (wstat 256, 0x100)\nFailed 1/9 subtests \n\nWhat's up with that?\n\n\t\t\tregards, tom lane", "msg_date": "Thu, 05 Jan 2023 18:02:31 -0500", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Logical replication - schema change not invalidating the relation\n cache" }, { "msg_contents": "On Fri, 6 Jan 2023 at 04:32, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> vignesh C <vignesh21@gmail.com> writes:\n> > On Thu, 5 Jan 2023 at 03:17, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >> The bigger picture here though is that in examples such as the one\n> >> you gave at the top of the thread, it's not very clear to me that\n> >> there's *any* principled behavior. If the connection between publisher\n> >> and subscriber tables is only the relation name, fine ... but exactly\n> >> which relation name applies?\n>\n> > The connection between publisher and subscriber table is based on\n> > relation id, During the first change relid, relname and schema name\n> > from publisher will be sent to the subscriber. Subscriber stores these\n> > id, relname and schema name in the LogicalRepRelMap hash for which\n> > relation id is the key. Subsequent data received in the subscriber\n> > will use the relation id received from the publisher and apply the\n> > changes in the subscriber.\n>\n> Hm. I spent some time cleaning up this patch, and found that there's\n> still a problem. ISTM that the row with value \"3\" ought to end up\n> in the subscriber's sch2.t1 table, but it does not: the attached\n> test script fails with\n>\n> t/100_bugs.pl .. 6/?\n> # Failed test 'check data in subscriber sch2.t1 after schema rename'\n> # at t/100_bugs.pl line 361.\n> # got: ''\n> # expected: '3'\n> # Looks like you failed 1 test of 9.\n> t/100_bugs.pl .. Dubious, test returned 1 (wstat 256, 0x100)\n> Failed 1/9 subtests\n>\n> What's up with that?\n\nWhen the subscription is created, the subscriber will create a\nsubscription relation map of the corresponding relations from the\npublication. The subscription relation map will only have sch1.t1\nentry. As sch2.t1 was not present in the publisher when the\nsubscription was created, subscription will not have this entry in the\nsubscription relation map. So the insert operations performed on the\nnew table sch2.t1 will not be applied by the subscriber. We will have\nto refresh the publication using 'ALTER SUBSCRIPTION ... REFRESH\nPUBLICATION' to fetch missing table information from publisher. This\nwill start replication of tables that were added to the subscribed-to\npublications since CREATE SUBSCRIPTION or the last invocation of\nREFRESH PUBLICATION.\nI have modified the test to include 'ALTER SUBSCRIPTION ... REFRESH\nPUBLICATION' to get the new data. The test should expect 1 & 3 for\nsch2.t1 as the record with value 1 was already inserted before rename.\nThe updated v6 patch has the changes for the same.\n\nRegards,\nVignesh", "msg_date": "Fri, 6 Jan 2023 10:47:24 +0530", "msg_from": "vignesh C <vignesh21@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Logical replication - schema change not invalidating the relation\n cache" }, { "msg_contents": "vignesh C <vignesh21@gmail.com> writes:\n> On Fri, 6 Jan 2023 at 04:32, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> Hm. I spent some time cleaning up this patch, and found that there's\n>> still a problem. ISTM that the row with value \"3\" ought to end up\n>> in the subscriber's sch2.t1 table, but it does not: the attached\n>> test script fails with\n>> ...\n>> What's up with that?\n\n> When the subscription is created, the subscriber will create a\n> subscription relation map of the corresponding relations from the\n> publication. The subscription relation map will only have sch1.t1\n> entry. As sch2.t1 was not present in the publisher when the\n> subscription was created, subscription will not have this entry in the\n> subscription relation map. So the insert operations performed on the\n> new table sch2.t1 will not be applied by the subscriber. We will have\n> to refresh the publication using 'ALTER SUBSCRIPTION ... REFRESH\n> PUBLICATION' to fetch missing table information from publisher. This\n> will start replication of tables that were added to the subscribed-to\n> publications since CREATE SUBSCRIPTION or the last invocation of\n> REFRESH PUBLICATION.\n\nBut ... but ... but ... that's the exact opposite of what the test\ncase shows to be happening. To wit, the newly created table\n(the second coming of sch1.t1) *is* replicated immediately, while\nthe pre-existing t1 (now sch2.t1) is not. It's impossible to\nexplain those two facts under either a model of \"tables are matched\nby name\" or \"tables are matched by OID\". So I'm still of the opinion\nthat there's some very dubious behavior here.\n\nHowever, it does seem that the cache flush makes one aspect better,\nso I pushed this after a little further work on the test case.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Fri, 06 Jan 2023 11:21:28 -0500", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Logical replication - schema change not invalidating the relation\n cache" } ]
[ { "msg_contents": "In my git workflow, I normally use scripts to simplify and check things.\nPreviously, most of my workfload was on master, with patches migrated to\nappropriate back branches.\n\nFYI, now that we have the release notes only in the major version\nbranches, I have had to adjust my scripts to allow for more\nper-major-version branches and automated doc builds of back branches. I\nthought people might like to know if they come upon the same issue.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Fri, 2 Jul 2021 13:34:05 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": true, "msg_subject": "Back-branch commit complexity" } ]
[ { "msg_contents": "On Fri, 2 Jul 2021 at 22:55, Dean Rasheed\n<dean(dot)a(dot)rasheed(at)gmail(dot)com> wrote:\n> Here's an update with the\n> last set of changes discussed.\nIf you allow me a small suggestion.\nMove the initializations of the variable tmp_var to after check if the\nfunction can run.\nSaves some cycles, when not running.\n\n /* Ensure we disallow calling when not in aggregate context */\n if (!AggCheckCallContext(fcinfo, NULL))\n elog(ERROR, \"aggregate function called in non-aggregate context\");\n\n+ init_var(&tmp_var);\n+\n\nregards,\nRanier Vilela\n\n\nOn Fri, 2 Jul 2021 at 22:55, Dean Rasheed <dean(dot)a(dot)rasheed(at)gmail(dot)com> wrote:> Here's an update with the> last set of changes discussed.\n\n\nIf you allow me a small suggestion.Move the initializations of the variable tmp_var to after check if the function can run.Saves some cycles, when not running. \t/* Ensure we disallow calling when not in aggregate context */ \tif (!AggCheckCallContext(fcinfo, NULL)) \t\telog(ERROR, \"aggregate function called in non-aggregate context\");\n+\tinit_var(&tmp_var);+regards,Ranier Vilela", "msg_date": "Fri, 2 Jul 2021 15:48:22 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "On Fri, 2 Jul 2021 at 19:48, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> If you allow me a small suggestion.\n> Move the initializations of the variable tmp_var to after check if the function can run.\n> Saves some cycles, when not running.\n>\n\nOK, thanks. I agree, on grounds of neatness and consistency with\nnearby code, so I've done it that way.\n\nNote, however, that it won't make any difference to performance in the\nway that you're suggesting -- elog() in Postgres is used for \"should\nnever happen, unless there's a software bug\" errors, rather than, say,\n\"might happen for certain invalid inputs\" errors, so init_var() should\nalways be called in these functions.\n\nRegards,\nDean\n\n\n", "msg_date": "Mon, 5 Jul 2021 10:44:11 +0100", "msg_from": "Dean Rasheed <dean.a.rasheed@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "Em seg., 5 de jul. de 2021 às 06:44, Dean Rasheed <dean.a.rasheed@gmail.com>\nescreveu:\n\n> On Fri, 2 Jul 2021 at 19:48, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> >\n> > If you allow me a small suggestion.\n> > Move the initializations of the variable tmp_var to after check if the\n> function can run.\n> > Saves some cycles, when not running.\n> >\n>\n> OK, thanks. I agree, on grounds of neatness and consistency with\n> nearby code, so I've done it that way.\n>\nThanks.\n\n\n> Note, however, that it won't make any difference to performance in the\n> way that you're suggesting -- elog() in Postgres is used for \"should\n> never happen, unless there's a software bug\" errors, rather than, say,\n> \"might happen for certain invalid inputs\" errors, so init_var() should\n> always be called in these functions.\n>\nI agree that in this case, most of the time, elog is not called.\nBut by writing this way, you are following the principle of not doing\nunnecessary work until it is absolutely necessary.\nIf you follow this principle, in general, the performance will always be\nbetter.\n\nregards,\nRanier Vilela\n\nEm seg., 5 de jul. de 2021 às 06:44, Dean Rasheed <dean.a.rasheed@gmail.com> escreveu:On Fri, 2 Jul 2021 at 19:48, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> If you allow me a small suggestion.\n> Move the initializations of the variable tmp_var to after check if the function can run.\n> Saves some cycles, when not running.\n>\n\nOK, thanks. I agree, on grounds of neatness and consistency with\nnearby code, so I've done it that way.Thanks. \n\nNote, however, that it won't make any difference to performance in the\nway that you're suggesting -- elog() in Postgres is used for \"should\nnever happen, unless there's a software bug\" errors, rather than, say,\n\"might happen for certain invalid inputs\" errors, so init_var() should\nalways be called in these functions.I agree that in this case, most of the time, elog is not called.But by writing this way, you are following the principle of not doing unnecessary work until it is absolutely necessary.If you follow this principle, in general, the performance will always be better.regards,Ranier Vilela", "msg_date": "Mon, 5 Jul 2021 08:07:35 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "On Mon, 5 Jul 2021 at 23:07, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> Em seg., 5 de jul. de 2021 às 06:44, Dean Rasheed <dean.a.rasheed@gmail.com> escreveu:\n>> Note, however, that it won't make any difference to performance in the\n>> way that you're suggesting -- elog() in Postgres is used for \"should\n>> never happen, unless there's a software bug\" errors, rather than, say,\n>> \"might happen for certain invalid inputs\" errors, so init_var() should\n>> always be called in these functions.\n>\n> I agree that in this case, most of the time, elog is not called.\n\nYou may have misunderstood what Dean meant. elog(ERROR) calls are now\nexclusively for \"cannot happen\" cases. If someone gets one of these\nthen there's a bug to fix or something else serious has gone wrong\nwith the hardware.\n\nThe case you seem to be talking about would fit better if the code in\nquestion had been ereport(ERROR).\n\nI don't disagree that the initialisation is better to happen after the\nelog. I'm just mentioning this as I wanted to make sure you knew the\ndifference between elog(ERROR) and ereport(ERROR).\n\nDavid\n\n\n", "msg_date": "Tue, 6 Jul 2021 00:01:35 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Numeric multiplication overflow errors" }, { "msg_contents": "Em seg., 5 de jul. de 2021 às 09:02, David Rowley <dgrowleyml@gmail.com>\nescreveu:\n\n> On Mon, 5 Jul 2021 at 23:07, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> >\n> > Em seg., 5 de jul. de 2021 às 06:44, Dean Rasheed <\n> dean.a.rasheed@gmail.com> escreveu:\n> >> Note, however, that it won't make any difference to performance in the\n> >> way that you're suggesting -- elog() in Postgres is used for \"should\n> >> never happen, unless there's a software bug\" errors, rather than, say,\n> >> \"might happen for certain invalid inputs\" errors, so init_var() should\n> >> always be called in these functions.\n> >\n> > I agree that in this case, most of the time, elog is not called.\n>\n> You may have misunderstood what Dean meant. elog(ERROR) calls are now\n> exclusively for \"cannot happen\" cases. If someone gets one of these\n> then there's a bug to fix or something else serious has gone wrong\n> with the hardware.\n>\n> The case you seem to be talking about would fit better if the code in\n> question had been ereport(ERROR).\n>\n> I don't disagree that the initialisation is better to happen after the\n> elog. I'm just mentioning this as I wanted to make sure you knew the\n> difference between elog(ERROR) and ereport(ERROR).\n>\nI understand the difference now, thanks for clarifying.\n\nregards,\nRanier Vilela\n\nEm seg., 5 de jul. de 2021 às 09:02, David Rowley <dgrowleyml@gmail.com> escreveu:On Mon, 5 Jul 2021 at 23:07, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> Em seg., 5 de jul. de 2021 às 06:44, Dean Rasheed <dean.a.rasheed@gmail.com> escreveu:\n>> Note, however, that it won't make any difference to performance in the\n>> way that you're suggesting -- elog() in Postgres is used for \"should\n>> never happen, unless there's a software bug\" errors, rather than, say,\n>> \"might happen for certain invalid inputs\" errors, so init_var() should\n>> always be called in these functions.\n>\n> I agree that in this case, most of the time, elog is not called.\n\nYou may have misunderstood what Dean meant.  elog(ERROR) calls are now\nexclusively for \"cannot happen\" cases.  If someone gets one of these\nthen there's a bug to fix or something else serious has gone wrong\nwith the hardware.\n\nThe case you seem to be talking about would fit better if the code in\nquestion had been ereport(ERROR).\n\nI don't disagree that the initialisation is better to happen after the\nelog. I'm just mentioning this as I wanted to make sure you knew the\ndifference between elog(ERROR) and ereport(ERROR).I understand the difference now, thanks for clarifying.regards,Ranier Vilela", "msg_date": "Mon, 5 Jul 2021 09:10:00 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Numeric multiplication overflow errors" } ]
[ { "msg_contents": "Don't try to print data type names in slot_store_error_callback().\n\nThe existing code tried to do syscache lookups in an already-failed\ntransaction, which is problematic to say the least. After some\nconsideration of alternatives, the best fix seems to be to just drop\ntype names from the error message altogether. The table and column\nnames seem like sufficient localization. If the user is unsure what\ntypes are involved, she can check the local and remote table\ndefinitions.\n\nHaving done that, we can also discard the LogicalRepTypMap hash\ntable, which had no other use. Arguably, LOGICAL_REP_MSG_TYPE\nreplication messages are now obsolete as well; but we should\nprobably keep them in case some other use emerges. (The complexity\nof removing something from the replication protocol would likely\noutweigh any savings anyhow.)\n\nMasahiko Sawada and Bharath Rupireddy, per complaint from Andres\nFreund. Back-patch to v10 where this code originated.\n\nDiscussion: https://postgr.es/m/20210106020229.ne5xnuu6wlondjpe@alap3.anarazel.de\n\nBranch\n------\nREL_10_STABLE\n\nDetails\n-------\nhttps://git.postgresql.org/pg/commitdiff/0b5089e8c9795cd9611ef5dfee756c97e14cfe75\n\nModified Files\n--------------\nsrc/backend/replication/logical/relation.c | 104 +----------------------------\nsrc/backend/replication/logical/worker.c | 32 ++-------\nsrc/include/replication/logicalrelation.h | 3 -\n3 files changed, 6 insertions(+), 133 deletions(-)", "msg_date": "Fri, 02 Jul 2021 20:05:33 +0000", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "pgsql: Don't try to print data type names in\n slot_store_error_callback(" }, { "msg_contents": "On Fri, Jul 2, 2021 at 4:05 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> The existing code tried to do syscache lookups in an already-failed\n> transaction, which is problematic to say the least.\n\nWhy didn't the assertion in SearchCatCacheInternal() catch this? Was\nthis code never actually tested?\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Thu, 8 Jul 2021 10:57:22 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: pgsql: Don't try to print data type names in\n slot_store_error_callback(" }, { "msg_contents": "Robert Haas <robertmhaas@gmail.com> writes:\n> On Fri, Jul 2, 2021 at 4:05 PM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> The existing code tried to do syscache lookups in an already-failed\n>> transaction, which is problematic to say the least.\n\n> Why didn't the assertion in SearchCatCacheInternal() catch this? Was\n> this code never actually tested?\n\nIt wasn't tested under conditions in which a catalog lookup would\nbe at real risk of failing --- and I'm not sure that we have any\nway to do that mechanically. See discussion downthread about when\na transaction could be considered \"failed\".\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 08 Jul 2021 11:21:48 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: pgsql: Don't try to print data type names in\n slot_store_error_callback(" } ]
[ { "msg_contents": "I've done a experimental tool to convert bison grammars to a kind of \nEBNF understood byhttps://www.bottlecaps.de/rr/ui \n<https://www.bottlecaps.de/rr/ui>to generate railroad diagrams see \nbellow the converted'postgresql-13.3/src/backend/parser/gram.y' and with \nsome hand made changes to allow view it \nathttps://www.bottlecaps.de/rr/ui <https://www.bottlecaps.de/rr/ui>the \norder of the rules could be changed to a better view of the railroad \ndiagrams. Copy and paste the EBNF bellow \nonhttps://www.bottlecaps.de/rr/ui <https://www.bottlecaps.de/rr/ui>tab \nEdit Grammar then switch to the tab View Diagram.\n\n====\n\n/*\n From postgresql-13.3/src/backend/parser/gram.y\n*/\n\nstmtblock ::= stmtmulti\nstmtmulti ::= stmtmulti ';' stmt | stmt\nstmt ::= AlterEventTrigStmt | AlterCollationStmt | AlterDatabaseStmt | \nAlterDatabaseSetStmt | AlterDefaultPrivilegesStmt | AlterDomainStmt | \nAlterEnumStmt | AlterExtensionStmt | AlterExtensionContentsStmt | \nAlterFdwStmt | AlterForeignServerStmt | AlterForeignTableStmt | \nAlterFunctionStmt | AlterGroupStmt | AlterObjectDependsStmt | \nAlterObjectSchemaStmt | AlterOwnerStmt | AlterOperatorStmt | \nAlterTypeStmt | AlterPolicyStmt | AlterSeqStmt | AlterSystemStmt | \nAlterTableStmt | AlterTblSpcStmt | AlterCompositeTypeStmt | \nAlterPublicationStmt | AlterRoleSetStmt | AlterRoleStmt | \nAlterSubscriptionStmt | AlterStatsStmt | AlterTSConfigurationStmt | \nAlterTSDictionaryStmt | AlterUserMappingStmt | AnalyzeStmt | CallStmt | \nCheckPointStmt | ClosePortalStmt | ClusterStmt | CommentStmt | \nConstraintsSetStmt | CopyStmt | CreateAmStmt | CreateAsStmt | \nCreateAssertionStmt | CreateCastStmt | CreateConversionStmt | \nCreateDomainStmt | CreateExtensionStmt | CreateFdwStmt | \nCreateForeignServerStmt | CreateForeignTableStmt | CreateFunctionStmt | \nCreateGroupStmt | CreateMatViewStmt | CreateOpClassStmt | \nCreateOpFamilyStmt | CreatePublicationStmt | AlterOpFamilyStmt | \nCreatePolicyStmt | CreatePLangStmt | CreateSchemaStmt | CreateSeqStmt | \nCreateStmt | CreateSubscriptionStmt | CreateStatsStmt | \nCreateTableSpaceStmt | CreateTransformStmt | CreateTrigStmt | \nCreateEventTrigStmt | CreateRoleStmt | CreateUserStmt | \nCreateUserMappingStmt | CreatedbStmt | DeallocateStmt | \nDeclareCursorStmt | DefineStmt | DeleteStmt | DiscardStmt | DoStmt | \nDropCastStmt | DropOpClassStmt | DropOpFamilyStmt | DropOwnedStmt | \nDropPLangStmt | DropStmt | DropSubscriptionStmt | DropTableSpaceStmt | \nDropTransformStmt | DropRoleStmt | DropUserMappingStmt | DropdbStmt | \nExecuteStmt | ExplainStmt | FetchStmt | GrantStmt | GrantRoleStmt | \nImportForeignSchemaStmt | IndexStmt | InsertStmt | ListenStmt | \nRefreshMatViewStmt | LoadStmt | LockStmt | NotifyStmt | PrepareStmt | \nReassignOwnedStmt | ReindexStmt | RemoveAggrStmt | RemoveFuncStmt | \nRemoveOperStmt | RenameStmt | RevokeStmt | RevokeRoleStmt | RuleStmt | \nSecLabelStmt | SelectStmt | TransactionStmt | TruncateStmt | \nUnlistenStmt | UpdateStmt | VacuumStmt | VariableResetStmt | \nVariableSetStmt | VariableShowStmt | ViewStmt |\nCallStmt ::= CALL func_application\nCreateRoleStmt ::= CREATE ROLE RoleId opt_with OptRoleList\nopt_with ::= WITH | WITH_LA |\nOptRoleList ::= OptRoleList CreateOptRoleElem |\nAlterOptRoleList ::= AlterOptRoleList AlterOptRoleElem |\nAlterOptRoleElem ::= PASSWORD Sconst | PASSWORD NULL_P | ENCRYPTED \nPASSWORD Sconst | UNENCRYPTED PASSWORD Sconst | INHERIT | CONNECTION \nLIMIT SignedIconst | VALID UNTIL Sconst | USER role_list | IDENT\nCreateOptRoleElem ::= AlterOptRoleElem | SYSID Iconst | ADMIN role_list \n| ROLE role_list | IN_P ROLE role_list | IN_P GROUP_P role_list\nCreateUserStmt ::= CREATE USER RoleId opt_with OptRoleList\nAlterRoleStmt ::= ALTER ROLE RoleSpec opt_with AlterOptRoleList | ALTER \nUSER RoleSpec opt_with AlterOptRoleList\nopt_in_database ::= | IN_P DATABASE database_name\nAlterRoleSetStmt ::= ALTER ROLE RoleSpec opt_in_database SetResetClause \n| ALTER ROLE ALL opt_in_database SetResetClause | ALTER USER RoleSpec \nopt_in_database SetResetClause | ALTER USER ALL opt_in_database \nSetResetClause\nDropRoleStmt ::= DROP ROLE role_list | DROP ROLE IF_P EXISTS role_list | \nDROP USER role_list | DROP USER IF_P EXISTS role_list | DROP GROUP_P \nrole_list | DROP GROUP_P IF_P EXISTS role_list\nCreateGroupStmt ::= CREATE GROUP_P RoleId opt_with OptRoleList\nAlterGroupStmt ::= ALTER GROUP_P RoleSpec add_drop USER role_list\nadd_drop ::= ADD_P | DROP\nCreateSchemaStmt ::= CREATE SCHEMA OptSchemaName AUTHORIZATION RoleSpec \nOptSchemaEltList | CREATE SCHEMA ColId OptSchemaEltList | CREATE SCHEMA \nIF_P NOT EXISTS OptSchemaName AUTHORIZATION RoleSpec OptSchemaEltList | \nCREATE SCHEMA IF_P NOT EXISTS ColId OptSchemaEltList\nOptSchemaName ::= ColId |\nOptSchemaEltList ::= OptSchemaEltList schema_stmt |\nschema_stmt ::= CreateStmt | IndexStmt | CreateSeqStmt | CreateTrigStmt \n| GrantStmt | ViewStmt\nVariableSetStmt ::= SET set_rest | SET LOCAL set_rest | SET SESSION set_rest\nset_rest ::= TRANSACTION transaction_mode_list | SESSION CHARACTERISTICS \nAS TRANSACTION transaction_mode_list | set_rest_more\ngeneric_set ::= var_name TO var_list | var_name '=' var_list | var_name \nTO DEFAULT | var_name '=' DEFAULT\nset_rest_more ::= generic_set | var_name FROM CURRENT_P | TIME ZONE \nzone_value | CATALOG_P Sconst | SCHEMA Sconst | NAMES opt_encoding | \nROLE NonReservedWord_or_Sconst | SESSION AUTHORIZATION \nNonReservedWord_or_Sconst | SESSION AUTHORIZATION DEFAULT | XML_P OPTION \ndocument_or_content | TRANSACTION SNAPSHOT Sconst\nvar_name ::= ColId | var_name '.' ColId\nvar_list ::= var_value | var_list ',' var_value\nvar_value ::= opt_boolean_or_string | NumericOnly\niso_level ::= READ UNCOMMITTED | READ COMMITTED | REPEATABLE READ | \nSERIALIZABLE\nopt_boolean_or_string ::= TRUE_P | FALSE_P | ON | NonReservedWord_or_Sconst\nzone_value ::= Sconst | IDENT | ConstInterval Sconst opt_interval | \nConstInterval '(' Iconst ')' Sconst | NumericOnly | DEFAULT | LOCAL\nopt_encoding ::= Sconst | DEFAULT |\nNonReservedWord_or_Sconst ::= NonReservedWord | Sconst\nVariableResetStmt ::= RESET reset_rest\nreset_rest ::= generic_reset | TIME ZONE | TRANSACTION ISOLATION LEVEL | \nSESSION AUTHORIZATION\ngeneric_reset ::= var_name | ALL\nSetResetClause ::= SET set_rest | VariableResetStmt\nFunctionSetResetClause ::= SET set_rest_more | VariableResetStmt\nVariableShowStmt ::= SHOW var_name | SHOW TIME ZONE | SHOW TRANSACTION \nISOLATION LEVEL | SHOW SESSION AUTHORIZATION | SHOW ALL\nConstraintsSetStmt ::= SET CONSTRAINTS constraints_set_list \nconstraints_set_mode\nconstraints_set_list ::= ALL | qualified_name_list\nconstraints_set_mode ::= DEFERRED | IMMEDIATE\nCheckPointStmt ::= CHECKPOINT\nDiscardStmt ::= DISCARD ALL | DISCARD TEMP | DISCARD TEMPORARY | DISCARD \nPLANS | DISCARD SEQUENCES\nAlterTableStmt ::= ALTER TABLE relation_expr alter_table_cmds | ALTER \nTABLE IF_P EXISTS relation_expr alter_table_cmds | ALTER TABLE \nrelation_expr partition_cmd | ALTER TABLE IF_P EXISTS relation_expr \npartition_cmd | ALTER TABLE ALL IN_P TABLESPACE name SET TABLESPACE name \nopt_nowait | ALTER TABLE ALL IN_P TABLESPACE name OWNED BY role_list SET \nTABLESPACE name opt_nowait | ALTER INDEX qualified_name alter_table_cmds \n| ALTER INDEX IF_P EXISTS qualified_name alter_table_cmds | ALTER INDEX \nqualified_name index_partition_cmd | ALTER INDEX ALL IN_P TABLESPACE \nname SET TABLESPACE name opt_nowait | ALTER INDEX ALL IN_P TABLESPACE \nname OWNED BY role_list SET TABLESPACE name opt_nowait | ALTER SEQUENCE \nqualified_name alter_table_cmds | ALTER SEQUENCE IF_P EXISTS \nqualified_name alter_table_cmds | ALTER VIEW qualified_name \nalter_table_cmds | ALTER VIEW IF_P EXISTS qualified_name \nalter_table_cmds | ALTER MATERIALIZED VIEW qualified_name \nalter_table_cmds | ALTER MATERIALIZED VIEW IF_P EXISTS qualified_name \nalter_table_cmds | ALTER MATERIALIZED VIEW ALL IN_P TABLESPACE name SET \nTABLESPACE name opt_nowait | ALTER MATERIALIZED VIEW ALL IN_P TABLESPACE \nname OWNED BY role_list SET TABLESPACE name opt_nowait\nalter_table_cmds ::= alter_table_cmd | alter_table_cmds ',' alter_table_cmd\npartition_cmd ::= ATTACH PARTITION qualified_name PartitionBoundSpec | \nDETACH PARTITION qualified_name\nindex_partition_cmd ::= ATTACH PARTITION qualified_name\nalter_table_cmd ::= ADD_P columnDef | ADD_P IF_P NOT EXISTS columnDef | \nADD_P COLUMN columnDef | ADD_P COLUMN IF_P NOT EXISTS columnDef | ALTER \nopt_column ColId alter_column_default | ALTER opt_column ColId DROP NOT \nNULL_P | ALTER opt_column ColId SET NOT NULL_P | ALTER opt_column ColId \nDROP EXPRESSION | ALTER opt_column ColId DROP EXPRESSION IF_P EXISTS | \nALTER opt_column ColId SET STATISTICS SignedIconst | ALTER opt_column \nIconst SET STATISTICS SignedIconst | ALTER opt_column ColId SET \nreloptions | ALTER opt_column ColId RESET reloptions | ALTER opt_column \nColId SET STORAGE ColId | ALTER opt_column ColId ADD_P GENERATED \ngenerated_when AS IDENTITY_P OptParenthesizedSeqOptList | ALTER \nopt_column ColId alter_identity_column_option_list | ALTER opt_column \nColId DROP IDENTITY_P | ALTER opt_column ColId DROP IDENTITY_P IF_P \nEXISTS | DROP opt_column IF_P EXISTS ColId opt_drop_behavior | DROP \nopt_column ColId opt_drop_behavior | ALTER opt_column ColId opt_set_data \nTYPE_P Typename opt_collate_clause alter_using | ALTER opt_column ColId \nalter_generic_options | ADD_P TableConstraint | ALTER CONSTRAINT name \nConstraintAttributeSpec | VALIDATE CONSTRAINT name | DROP CONSTRAINT \nIF_P EXISTS name opt_drop_behavior | DROP CONSTRAINT name \nopt_drop_behavior | SET WITHOUT OIDS | CLUSTER ON name | SET WITHOUT \nCLUSTER | SET LOGGED | SET UNLOGGED | ENABLE_P TRIGGER name | ENABLE_P \nALWAYS TRIGGER name | ENABLE_P REPLICA TRIGGER name | ENABLE_P TRIGGER \nALL | ENABLE_P TRIGGER USER | DISABLE_P TRIGGER name | DISABLE_P TRIGGER \nALL | DISABLE_P TRIGGER USER | ENABLE_P RULE name | ENABLE_P ALWAYS RULE \nname | ENABLE_P REPLICA RULE name | DISABLE_P RULE name | INHERIT \nqualified_name | NO INHERIT qualified_name | OF any_name | NOT OF | \nOWNER TO RoleSpec | SET TABLESPACE name | SET reloptions | RESET \nreloptions | REPLICA IDENTITY_P replica_identity | ENABLE_P ROW LEVEL \nSECURITY | DISABLE_P ROW LEVEL SECURITY | FORCE ROW LEVEL SECURITY | NO \nFORCE ROW LEVEL SECURITY | alter_generic_options\nalter_column_default ::= SET DEFAULT a_expr | DROP DEFAULT\nopt_drop_behavior ::= CASCADE | RESTRICT |\nopt_collate_clause ::= COLLATE any_name |\nalter_using ::= USING a_expr |\nreplica_identity ::= NOTHING | FULL | DEFAULT | USING INDEX name\nreloptions ::= '(' reloption_list ')'\nopt_reloptions ::= WITH reloptions |\nreloption_list ::= reloption_elem | reloption_list ',' reloption_elem\nreloption_elem ::= ColLabel '=' def_arg | ColLabel | ColLabel '.' \nColLabel '=' def_arg | ColLabel '.' ColLabel\nalter_identity_column_option_list ::= alter_identity_column_option | \nalter_identity_column_option_list alter_identity_column_option\nalter_identity_column_option ::= RESTART | RESTART opt_with NumericOnly \n| SET SeqOptElem | SET GENERATED generated_when\nPartitionBoundSpec ::= FOR VALUES WITH '(' hash_partbound ')' | FOR \nVALUES IN_P '(' expr_list ')' | FOR VALUES FROM '(' expr_list ')' TO '(' \nexpr_list ')' | DEFAULT\nhash_partbound_elem ::= NonReservedWord Iconst\nhash_partbound ::= hash_partbound_elem | hash_partbound ',' \nhash_partbound_elem\nAlterCompositeTypeStmt ::= ALTER TYPE_P any_name alter_type_cmds\nalter_type_cmds ::= alter_type_cmd | alter_type_cmds ',' alter_type_cmd\nalter_type_cmd ::= ADD_P ATTRIBUTE TableFuncElement opt_drop_behavior | \nDROP ATTRIBUTE IF_P EXISTS ColId opt_drop_behavior | DROP ATTRIBUTE \nColId opt_drop_behavior | ALTER ATTRIBUTE ColId opt_set_data TYPE_P \nTypename opt_collate_clause opt_drop_behavior\nClosePortalStmt ::= CLOSE cursor_name | CLOSE ALL\nCopyStmt ::= COPY opt_binary qualified_name opt_column_list copy_from \nopt_program copy_file_name copy_delimiter opt_with copy_options \nwhere_clause | COPY '(' PreparableStmt ')' TO opt_program copy_file_name \nopt_with copy_options\ncopy_from ::= FROM | TO\nopt_program ::= PROGRAM |\ncopy_file_name ::= Sconst | STDIN | STDOUT\ncopy_options ::= copy_opt_list | '(' copy_generic_opt_list ')'\ncopy_opt_list ::= copy_opt_list copy_opt_item |\ncopy_opt_item ::= BINARY | FREEZE | DELIMITER opt_as Sconst | NULL_P \nopt_as Sconst | CSV | HEADER_P | QUOTE opt_as Sconst | ESCAPE opt_as \nSconst | FORCE QUOTE columnList | FORCE QUOTE '*' | FORCE NOT NULL_P \ncolumnList | FORCE NULL_P columnList | ENCODING Sconst\nopt_binary ::= BINARY |\ncopy_delimiter ::= opt_using DELIMITERS Sconst |\nopt_using ::= USING |\ncopy_generic_opt_list ::= copy_generic_opt_elem | copy_generic_opt_list \n',' copy_generic_opt_elem\ncopy_generic_opt_elem ::= ColLabel copy_generic_opt_arg\ncopy_generic_opt_arg ::= opt_boolean_or_string | NumericOnly | '*' | '(' \ncopy_generic_opt_arg_list ')' |\ncopy_generic_opt_arg_list ::= copy_generic_opt_arg_list_item | \ncopy_generic_opt_arg_list ',' copy_generic_opt_arg_list_item\ncopy_generic_opt_arg_list_item ::= opt_boolean_or_string\nCreateStmt ::= CREATE OptTemp TABLE qualified_name '(' \nOptTableElementList ')' OptInherit OptPartitionSpec \ntable_access_method_clause OptWith OnCommitOption OptTableSpace | CREATE \nOptTemp TABLE IF_P NOT EXISTS qualified_name '(' OptTableElementList ')' \nOptInherit OptPartitionSpec table_access_method_clause OptWith \nOnCommitOption OptTableSpace | CREATE OptTemp TABLE qualified_name OF \nany_name OptTypedTableElementList OptPartitionSpec \ntable_access_method_clause OptWith OnCommitOption OptTableSpace | CREATE \nOptTemp TABLE IF_P NOT EXISTS qualified_name OF any_name \nOptTypedTableElementList OptPartitionSpec table_access_method_clause \nOptWith OnCommitOption OptTableSpace | CREATE OptTemp TABLE \nqualified_name PARTITION OF qualified_name OptTypedTableElementList \nPartitionBoundSpec OptPartitionSpec table_access_method_clause OptWith \nOnCommitOption OptTableSpace | CREATE OptTemp TABLE IF_P NOT EXISTS \nqualified_name PARTITION OF qualified_name OptTypedTableElementList \nPartitionBoundSpec OptPartitionSpec table_access_method_clause OptWith \nOnCommitOption OptTableSpace\nOptTemp ::= TEMPORARY | TEMP | LOCAL TEMPORARY | LOCAL TEMP | GLOBAL \nTEMPORARY | GLOBAL TEMP | UNLOGGED |\nOptTableElementList ::= TableElementList |\nOptTypedTableElementList ::= '(' TypedTableElementList ')' |\nTableElementList ::= TableElement | TableElementList ',' TableElement\nTypedTableElementList ::= TypedTableElement | TypedTableElementList ',' \nTypedTableElement\nTableElement ::= columnDef | TableLikeClause | TableConstraint\nTypedTableElement ::= columnOptions | TableConstraint\ncolumnDef ::= ColId Typename create_generic_options ColQualList\ncolumnOptions ::= ColId ColQualList | ColId WITH OPTIONS ColQualList\nColQualList ::= ColQualList ColConstraint |\nColConstraint ::= CONSTRAINT name ColConstraintElem | ColConstraintElem \n| ConstraintAttr | COLLATE any_name\nColConstraintElem ::= NOT NULL_P | NULL_P | UNIQUE opt_definition \nOptConsTableSpace | PRIMARY KEY opt_definition OptConsTableSpace | CHECK \n'(' a_expr ')' opt_no_inherit | DEFAULT b_expr | GENERATED \ngenerated_when AS IDENTITY_P OptParenthesizedSeqOptList | GENERATED \ngenerated_when AS '(' a_expr ')' STORED | REFERENCES qualified_name \nopt_column_list key_match key_actions\ngenerated_when ::= ALWAYS | BY DEFAULT\nConstraintAttr ::= DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | \nINITIALLY IMMEDIATE\nTableLikeClause ::= LIKE qualified_name TableLikeOptionList\nTableLikeOptionList ::= TableLikeOptionList INCLUDING TableLikeOption | \nTableLikeOptionList EXCLUDING TableLikeOption |\nTableLikeOption ::= COMMENTS | CONSTRAINTS | DEFAULTS | IDENTITY_P | \nGENERATED | INDEXES | STATISTICS | STORAGE | ALL\nTableConstraint ::= CONSTRAINT name ConstraintElem | ConstraintElem\nConstraintElem ::= CHECK '(' a_expr ')' ConstraintAttributeSpec | UNIQUE \n'(' columnList ')' opt_c_include opt_definition OptConsTableSpace \nConstraintAttributeSpec | UNIQUE ExistingIndex ConstraintAttributeSpec | \nPRIMARY KEY '(' columnList ')' opt_c_include opt_definition \nOptConsTableSpace ConstraintAttributeSpec | PRIMARY KEY ExistingIndex \nConstraintAttributeSpec | EXCLUDE access_method_clause '(' \nExclusionConstraintList ')' opt_c_include opt_definition \nOptConsTableSpace ExclusionWhereClause ConstraintAttributeSpec | FOREIGN \nKEY '(' columnList ')' REFERENCES qualified_name opt_column_list \nkey_match key_actions ConstraintAttributeSpec\nopt_no_inherit ::= NO INHERIT |\nopt_column_list ::= '(' columnList ')' |\ncolumnList ::= columnElem | columnList ',' columnElem\ncolumnElem ::= ColId\nopt_c_include ::= INCLUDE '(' columnList ')' |\nkey_match ::= MATCH FULL | MATCH PARTIAL | MATCH SIMPLE |\nExclusionConstraintList ::= ExclusionConstraintElem | \nExclusionConstraintList ',' ExclusionConstraintElem\nExclusionConstraintElem ::= index_elem WITH any_operator | index_elem \nWITH OPERATOR '(' any_operator ')'\nExclusionWhereClause ::= WHERE '(' a_expr ')' |\nkey_actions ::= key_update | key_delete | key_update key_delete | \nkey_delete key_update |\nkey_update ::= ON UPDATE key_action\nkey_delete ::= ON DELETE_P key_action\nkey_action ::= NO ACTION | RESTRICT | CASCADE | SET NULL_P | SET DEFAULT\nOptInherit ::= INHERITS '(' qualified_name_list ')' |\nOptPartitionSpec ::= PartitionSpec |\nPartitionSpec ::= PARTITION BY ColId '(' part_params ')'\npart_params ::= part_elem | part_params ',' part_elem\npart_elem ::= ColId opt_collate opt_class | func_expr_windowless \nopt_collate opt_class | '(' a_expr ')' opt_collate opt_class\ntable_access_method_clause ::= USING access_method |\nOptWith ::= WITH reloptions | WITHOUT OIDS |\nOnCommitOption ::= ON COMMIT DROP | ON COMMIT DELETE_P ROWS | ON COMMIT \nPRESERVE ROWS |\nOptTableSpace ::= TABLESPACE name |\nOptConsTableSpace ::= USING INDEX TABLESPACE name |\nExistingIndex ::= USING INDEX index_name\nCreateStatsStmt ::= CREATE STATISTICS any_name opt_name_list ON \nexpr_list FROM from_list | CREATE STATISTICS IF_P NOT EXISTS any_name \nopt_name_list ON expr_list FROM from_list\nAlterStatsStmt ::= ALTER STATISTICS any_name SET STATISTICS SignedIconst \n| ALTER STATISTICS IF_P EXISTS any_name SET STATISTICS SignedIconst\nCreateAsStmt ::= CREATE OptTemp TABLE create_as_target AS SelectStmt \nopt_with_data | CREATE OptTemp TABLE IF_P NOT EXISTS create_as_target AS \nSelectStmt opt_with_data\ncreate_as_target ::= qualified_name opt_column_list \ntable_access_method_clause OptWith OnCommitOption OptTableSpace\nopt_with_data ::= WITH DATA_P | WITH NO DATA_P |\nCreateMatViewStmt ::= CREATE OptNoLog MATERIALIZED VIEW create_mv_target \nAS SelectStmt opt_with_data | CREATE OptNoLog MATERIALIZED VIEW IF_P NOT \nEXISTS create_mv_target AS SelectStmt opt_with_data\ncreate_mv_target ::= qualified_name opt_column_list \ntable_access_method_clause opt_reloptions OptTableSpace\nOptNoLog ::= UNLOGGED |\nRefreshMatViewStmt ::= REFRESH MATERIALIZED VIEW opt_concurrently \nqualified_name opt_with_data\nCreateSeqStmt ::= CREATE OptTemp SEQUENCE qualified_name OptSeqOptList | \nCREATE OptTemp SEQUENCE IF_P NOT EXISTS qualified_name OptSeqOptList\nAlterSeqStmt ::= ALTER SEQUENCE qualified_name SeqOptList | ALTER \nSEQUENCE IF_P EXISTS qualified_name SeqOptList\nOptSeqOptList ::= SeqOptList |\nOptParenthesizedSeqOptList ::= '(' SeqOptList ')' |\nSeqOptList ::= SeqOptElem | SeqOptList SeqOptElem\nSeqOptElem ::= AS SimpleTypename | CACHE NumericOnly | CYCLE | NO CYCLE \n| INCREMENT opt_by NumericOnly | MAXVALUE NumericOnly | MINVALUE \nNumericOnly | NO MAXVALUE | NO MINVALUE | OWNED BY any_name | SEQUENCE \nNAME_P any_name | START opt_with NumericOnly | RESTART | RESTART \nopt_with NumericOnly\nopt_by ::= BY |\nNumericOnly ::= FCONST | '+' FCONST | '-' FCONST | SignedIconst\nNumericOnly_list ::= NumericOnly | NumericOnly_list ',' NumericOnly\nCreatePLangStmt ::= CREATE opt_or_replace opt_trusted opt_procedural \nLANGUAGE NonReservedWord_or_Sconst | CREATE opt_or_replace opt_trusted \nopt_procedural LANGUAGE NonReservedWord_or_Sconst HANDLER handler_name \nopt_inline_handler opt_validator\nopt_trusted ::= TRUSTED |\nhandler_name ::= name | name attrs\nopt_inline_handler ::= INLINE_P handler_name |\nvalidator_clause ::= VALIDATOR handler_name | NO VALIDATOR\nopt_validator ::= validator_clause |\nDropPLangStmt ::= DROP opt_procedural LANGUAGE NonReservedWord_or_Sconst \nopt_drop_behavior | DROP opt_procedural LANGUAGE IF_P EXISTS \nNonReservedWord_or_Sconst opt_drop_behavior\nopt_procedural ::= PROCEDURAL |\nCreateTableSpaceStmt ::= CREATE TABLESPACE name OptTableSpaceOwner \nLOCATION Sconst opt_reloptions\nOptTableSpaceOwner ::= OWNER RoleSpec |\nDropTableSpaceStmt ::= DROP TABLESPACE name | DROP TABLESPACE IF_P \nEXISTS name\nCreateExtensionStmt ::= CREATE EXTENSION name opt_with \ncreate_extension_opt_list | CREATE EXTENSION IF_P NOT EXISTS name \nopt_with create_extension_opt_list\ncreate_extension_opt_list ::= create_extension_opt_list \ncreate_extension_opt_item |\ncreate_extension_opt_item ::= SCHEMA name | VERSION_P \nNonReservedWord_or_Sconst | FROM NonReservedWord_or_Sconst | CASCADE\nAlterExtensionStmt ::= ALTER EXTENSION name UPDATE alter_extension_opt_list\nalter_extension_opt_list ::= alter_extension_opt_list \nalter_extension_opt_item |\nalter_extension_opt_item ::= TO NonReservedWord_or_Sconst\nAlterExtensionContentsStmt ::= ALTER EXTENSION name add_drop ACCESS \nMETHOD name | ALTER EXTENSION name add_drop AGGREGATE \naggregate_with_argtypes | ALTER EXTENSION name add_drop CAST '(' \nTypename AS Typename ')' | ALTER EXTENSION name add_drop COLLATION \nany_name | ALTER EXTENSION name add_drop CONVERSION_P any_name | ALTER \nEXTENSION name add_drop DOMAIN_P Typename | ALTER EXTENSION name \nadd_drop FUNCTION function_with_argtypes | ALTER EXTENSION name add_drop \nopt_procedural LANGUAGE name | ALTER EXTENSION name add_drop OPERATOR \noperator_with_argtypes | ALTER EXTENSION name add_drop OPERATOR CLASS \nany_name USING access_method | ALTER EXTENSION name add_drop OPERATOR \nFAMILY any_name USING access_method | ALTER EXTENSION name add_drop \nPROCEDURE function_with_argtypes | ALTER EXTENSION name add_drop ROUTINE \nfunction_with_argtypes | ALTER EXTENSION name add_drop SCHEMA name | \nALTER EXTENSION name add_drop EVENT TRIGGER name | ALTER EXTENSION name \nadd_drop TABLE any_name | ALTER EXTENSION name add_drop TEXT_P SEARCH \nPARSER any_name | ALTER EXTENSION name add_drop TEXT_P SEARCH DICTIONARY \nany_name | ALTER EXTENSION name add_drop TEXT_P SEARCH TEMPLATE any_name \n| ALTER EXTENSION name add_drop TEXT_P SEARCH CONFIGURATION any_name | \nALTER EXTENSION name add_drop SEQUENCE any_name | ALTER EXTENSION name \nadd_drop VIEW any_name | ALTER EXTENSION name add_drop MATERIALIZED VIEW \nany_name | ALTER EXTENSION name add_drop FOREIGN TABLE any_name | ALTER \nEXTENSION name add_drop FOREIGN DATA_P WRAPPER name | ALTER EXTENSION \nname add_drop SERVER name | ALTER EXTENSION name add_drop TRANSFORM FOR \nTypename LANGUAGE name | ALTER EXTENSION name add_drop TYPE_P Typename\nCreateFdwStmt ::= CREATE FOREIGN DATA_P WRAPPER name opt_fdw_options \ncreate_generic_options\nfdw_option ::= HANDLER handler_name | NO HANDLER | VALIDATOR \nhandler_name | NO VALIDATOR\nfdw_options ::= fdw_option | fdw_options fdw_option\nopt_fdw_options ::= fdw_options |\nAlterFdwStmt ::= ALTER FOREIGN DATA_P WRAPPER name opt_fdw_options \nalter_generic_options | ALTER FOREIGN DATA_P WRAPPER name fdw_options\ncreate_generic_options ::= OPTIONS '(' generic_option_list ')' |\ngeneric_option_list ::= generic_option_elem | generic_option_list ',' \ngeneric_option_elem\nalter_generic_options ::= OPTIONS '(' alter_generic_option_list ')'\nalter_generic_option_list ::= alter_generic_option_elem | \nalter_generic_option_list ',' alter_generic_option_elem\nalter_generic_option_elem ::= generic_option_elem | SET \ngeneric_option_elem | ADD_P generic_option_elem | DROP generic_option_name\ngeneric_option_elem ::= generic_option_name generic_option_arg\ngeneric_option_name ::= ColLabel\ngeneric_option_arg ::= Sconst\nCreateForeignServerStmt ::= CREATE SERVER name opt_type \nopt_foreign_server_version FOREIGN DATA_P WRAPPER name \ncreate_generic_options | CREATE SERVER IF_P NOT EXISTS name opt_type \nopt_foreign_server_version FOREIGN DATA_P WRAPPER name \ncreate_generic_options\nopt_type ::= TYPE_P Sconst |\nforeign_server_version ::= VERSION_P Sconst | VERSION_P NULL_P\nopt_foreign_server_version ::= foreign_server_version |\nAlterForeignServerStmt ::= ALTER SERVER name foreign_server_version \nalter_generic_options | ALTER SERVER name foreign_server_version | ALTER \nSERVER name alter_generic_options\nCreateForeignTableStmt ::= CREATE FOREIGN TABLE qualified_name '(' \nOptTableElementList ')' OptInherit SERVER name create_generic_options | \nCREATE FOREIGN TABLE IF_P NOT EXISTS qualified_name '(' \nOptTableElementList ')' OptInherit SERVER name create_generic_options | \nCREATE FOREIGN TABLE qualified_name PARTITION OF qualified_name \nOptTypedTableElementList PartitionBoundSpec SERVER name \ncreate_generic_options | CREATE FOREIGN TABLE IF_P NOT EXISTS \nqualified_name PARTITION OF qualified_name OptTypedTableElementList \nPartitionBoundSpec SERVER name create_generic_options\nAlterForeignTableStmt ::= ALTER FOREIGN TABLE relation_expr \nalter_table_cmds | ALTER FOREIGN TABLE IF_P EXISTS relation_expr \nalter_table_cmds\nImportForeignSchemaStmt ::= IMPORT_P FOREIGN SCHEMA name \nimport_qualification FROM SERVER name INTO name create_generic_options\nimport_qualification_type ::= LIMIT TO | EXCEPT\nimport_qualification ::= import_qualification_type '(' \nrelation_expr_list ')' |\nCreateUserMappingStmt ::= CREATE USER MAPPING FOR auth_ident SERVER name \ncreate_generic_options | CREATE USER MAPPING IF_P NOT EXISTS FOR \nauth_ident SERVER name create_generic_options\nauth_ident ::= RoleSpec | USER\nDropUserMappingStmt ::= DROP USER MAPPING FOR auth_ident SERVER name | \nDROP USER MAPPING IF_P EXISTS FOR auth_ident SERVER name\nAlterUserMappingStmt ::= ALTER USER MAPPING FOR auth_ident SERVER name \nalter_generic_options\nCreatePolicyStmt ::= CREATE POLICY name ON qualified_name \nRowSecurityDefaultPermissive RowSecurityDefaultForCmd \nRowSecurityDefaultToRole RowSecurityOptionalExpr \nRowSecurityOptionalWithCheck\nAlterPolicyStmt ::= ALTER POLICY name ON qualified_name \nRowSecurityOptionalToRole RowSecurityOptionalExpr \nRowSecurityOptionalWithCheck\nRowSecurityOptionalExpr ::= USING '(' a_expr ')' |\nRowSecurityOptionalWithCheck ::= WITH CHECK '(' a_expr ')' |\nRowSecurityDefaultToRole ::= TO role_list |\nRowSecurityOptionalToRole ::= TO role_list |\nRowSecurityDefaultPermissive ::= AS IDENT |\nRowSecurityDefaultForCmd ::= FOR row_security_cmd |\nrow_security_cmd ::= ALL | SELECT | INSERT | UPDATE | DELETE_P\nCreateAmStmt ::= CREATE ACCESS METHOD name TYPE_P am_type HANDLER \nhandler_name\nam_type ::= INDEX | TABLE\nCreateTrigStmt ::= CREATE TRIGGER name TriggerActionTime TriggerEvents \nON qualified_name TriggerReferencing TriggerForSpec TriggerWhen EXECUTE \nFUNCTION_or_PROCEDURE func_name '(' TriggerFuncArgs ')' | CREATE \nCONSTRAINT TRIGGER name AFTER TriggerEvents ON qualified_name \nOptConstrFromTable ConstraintAttributeSpec FOR EACH ROW TriggerWhen \nEXECUTE FUNCTION_or_PROCEDURE func_name '(' TriggerFuncArgs ')'\nTriggerActionTime ::= BEFORE | AFTER | INSTEAD OF\nTriggerEvents ::= TriggerOneEvent | TriggerEvents OR TriggerOneEvent\nTriggerOneEvent ::= INSERT | DELETE_P | UPDATE | UPDATE OF columnList | \nTRUNCATE\nTriggerReferencing ::= REFERENCING TriggerTransitions |\nTriggerTransitions ::= TriggerTransition | TriggerTransitions \nTriggerTransition\nTriggerTransition ::= TransitionOldOrNew TransitionRowOrTable opt_as \nTransitionRelName\nTransitionOldOrNew ::= NEW | OLD\nTransitionRowOrTable ::= TABLE | ROW\nTransitionRelName ::= ColId\nTriggerForSpec ::= FOR TriggerForOptEach TriggerForType |\nTriggerForOptEach ::= EACH |\nTriggerForType ::= ROW | STATEMENT\nTriggerWhen ::= WHEN '(' a_expr ')' |\nFUNCTION_or_PROCEDURE ::= FUNCTION | PROCEDURE\nTriggerFuncArgs ::= TriggerFuncArg | TriggerFuncArgs ',' TriggerFuncArg |\nTriggerFuncArg ::= Iconst | FCONST | Sconst | ColLabel\nOptConstrFromTable ::= FROM qualified_name |\nConstraintAttributeSpec ::= | ConstraintAttributeSpec \nConstraintAttributeElem\nConstraintAttributeElem ::= NOT DEFERRABLE | DEFERRABLE | INITIALLY \nIMMEDIATE | INITIALLY DEFERRED | NOT VALID | NO INHERIT\nCreateEventTrigStmt ::= CREATE EVENT TRIGGER name ON ColLabel EXECUTE \nFUNCTION_or_PROCEDURE func_name '(' ')' | CREATE EVENT TRIGGER name ON \nColLabel WHEN event_trigger_when_list EXECUTE FUNCTION_or_PROCEDURE \nfunc_name '(' ')'\nevent_trigger_when_list ::= event_trigger_when_item | \nevent_trigger_when_list AND event_trigger_when_item\nevent_trigger_when_item ::= ColId IN_P '(' event_trigger_value_list ')'\nevent_trigger_value_list ::= SCONST | event_trigger_value_list ',' SCONST\nAlterEventTrigStmt ::= ALTER EVENT TRIGGER name enable_trigger\nenable_trigger ::= ENABLE_P | ENABLE_P REPLICA | ENABLE_P ALWAYS | DISABLE_P\nCreateAssertionStmt ::= CREATE ASSERTION any_name CHECK '(' a_expr ')' \nConstraintAttributeSpec\nDefineStmt ::= CREATE opt_or_replace AGGREGATE func_name aggr_args \ndefinition | CREATE opt_or_replace AGGREGATE func_name \nold_aggr_definition | CREATE OPERATOR any_operator definition | CREATE \nTYPE_P any_name definition | CREATE TYPE_P any_name | CREATE TYPE_P \nany_name AS '(' OptTableFuncElementList ')' | CREATE TYPE_P any_name AS \nENUM_P '(' opt_enum_val_list ')' | CREATE TYPE_P any_name AS RANGE \ndefinition | CREATE TEXT_P SEARCH PARSER any_name definition | CREATE \nTEXT_P SEARCH DICTIONARY any_name definition | CREATE TEXT_P SEARCH \nTEMPLATE any_name definition | CREATE TEXT_P SEARCH CONFIGURATION \nany_name definition | CREATE COLLATION any_name definition | CREATE \nCOLLATION IF_P NOT EXISTS any_name definition | CREATE COLLATION \nany_name FROM any_name | CREATE COLLATION IF_P NOT EXISTS any_name FROM \nany_name\ndefinition ::= '(' def_list ')'\ndef_list ::= def_elem | def_list ',' def_elem\ndef_elem ::= ColLabel '=' def_arg | ColLabel\ndef_arg ::= func_type | reserved_keyword | qual_all_Op | NumericOnly | \nSconst | NONE\nold_aggr_definition ::= '(' old_aggr_list ')'\nold_aggr_list ::= old_aggr_elem | old_aggr_list ',' old_aggr_elem\nold_aggr_elem ::= IDENT '=' def_arg\nopt_enum_val_list ::= enum_val_list |\nenum_val_list ::= Sconst | enum_val_list ',' Sconst\nAlterEnumStmt ::= ALTER TYPE_P any_name ADD_P VALUE_P opt_if_not_exists \nSconst | ALTER TYPE_P any_name ADD_P VALUE_P opt_if_not_exists Sconst \nBEFORE Sconst | ALTER TYPE_P any_name ADD_P VALUE_P opt_if_not_exists \nSconst AFTER Sconst | ALTER TYPE_P any_name RENAME VALUE_P Sconst TO Sconst\nopt_if_not_exists ::= IF_P NOT EXISTS |\nCreateOpClassStmt ::= CREATE OPERATOR CLASS any_name opt_default FOR \nTYPE_P Typename USING access_method opt_opfamily AS opclass_item_list\nopclass_item_list ::= opclass_item | opclass_item_list ',' opclass_item\nopclass_item ::= OPERATOR Iconst any_operator opclass_purpose \nopt_recheck | OPERATOR Iconst operator_with_argtypes opclass_purpose \nopt_recheck | FUNCTION Iconst function_with_argtypes | FUNCTION Iconst \n'(' type_list ')' function_with_argtypes | STORAGE Typename\nopt_default ::= DEFAULT |\nopt_opfamily ::= FAMILY any_name |\nopclass_purpose ::= FOR SEARCH | FOR ORDER BY any_name |\nopt_recheck ::= RECHECK |\nCreateOpFamilyStmt ::= CREATE OPERATOR FAMILY any_name USING access_method\nAlterOpFamilyStmt ::= ALTER OPERATOR FAMILY any_name USING access_method \nADD_P opclass_item_list | ALTER OPERATOR FAMILY any_name USING \naccess_method DROP opclass_drop_list\nopclass_drop_list ::= opclass_drop | opclass_drop_list ',' opclass_drop\nopclass_drop ::= OPERATOR Iconst '(' type_list ')' | FUNCTION Iconst '(' \ntype_list ')'\nDropOpClassStmt ::= DROP OPERATOR CLASS any_name USING access_method \nopt_drop_behavior | DROP OPERATOR CLASS IF_P EXISTS any_name USING \naccess_method opt_drop_behavior\nDropOpFamilyStmt ::= DROP OPERATOR FAMILY any_name USING access_method \nopt_drop_behavior | DROP OPERATOR FAMILY IF_P EXISTS any_name USING \naccess_method opt_drop_behavior\nDropOwnedStmt ::= DROP OWNED BY role_list opt_drop_behavior\nReassignOwnedStmt ::= REASSIGN OWNED BY role_list TO RoleSpec\nDropStmt ::= DROP drop_type_any_name IF_P EXISTS any_name_list \nopt_drop_behavior | DROP drop_type_any_name any_name_list \nopt_drop_behavior | DROP drop_type_name IF_P EXISTS name_list \nopt_drop_behavior | DROP drop_type_name name_list opt_drop_behavior | \nDROP drop_type_name_on_any_name name ON any_name opt_drop_behavior | \nDROP drop_type_name_on_any_name IF_P EXISTS name ON any_name \nopt_drop_behavior | DROP TYPE_P type_name_list opt_drop_behavior | DROP \nTYPE_P IF_P EXISTS type_name_list opt_drop_behavior | DROP DOMAIN_P \ntype_name_list opt_drop_behavior | DROP DOMAIN_P IF_P EXISTS \ntype_name_list opt_drop_behavior | DROP INDEX CONCURRENTLY any_name_list \nopt_drop_behavior | DROP INDEX CONCURRENTLY IF_P EXISTS any_name_list \nopt_drop_behavior\ndrop_type_any_name ::= TABLE | SEQUENCE | VIEW | MATERIALIZED VIEW | \nINDEX | FOREIGN TABLE | COLLATION | CONVERSION_P | STATISTICS | TEXT_P \nSEARCH PARSER | TEXT_P SEARCH DICTIONARY | TEXT_P SEARCH TEMPLATE | \nTEXT_P SEARCH CONFIGURATION\ndrop_type_name ::= ACCESS METHOD | EVENT TRIGGER | EXTENSION | FOREIGN \nDATA_P WRAPPER | PUBLICATION | SCHEMA | SERVER\ndrop_type_name_on_any_name ::= POLICY | RULE | TRIGGER\nany_name_list ::= any_name | any_name_list ',' any_name\nany_name ::= ColId | ColId attrs\nattrs ::= '.' attr_name | attrs '.' attr_name\ntype_name_list ::= Typename | type_name_list ',' Typename\nTruncateStmt ::= TRUNCATE opt_table relation_expr_list opt_restart_seqs \nopt_drop_behavior\nopt_restart_seqs ::= CONTINUE_P IDENTITY_P | RESTART IDENTITY_P |\nCommentStmt ::= COMMENT ON comment_type_any_name any_name IS \ncomment_text | COMMENT ON comment_type_name name IS comment_text | \nCOMMENT ON TYPE_P Typename IS comment_text | COMMENT ON DOMAIN_P \nTypename IS comment_text | COMMENT ON AGGREGATE aggregate_with_argtypes \nIS comment_text | COMMENT ON FUNCTION function_with_argtypes IS \ncomment_text | COMMENT ON OPERATOR operator_with_argtypes IS \ncomment_text | COMMENT ON CONSTRAINT name ON any_name IS comment_text | \nCOMMENT ON CONSTRAINT name ON DOMAIN_P any_name IS comment_text | \nCOMMENT ON POLICY name ON any_name IS comment_text | COMMENT ON \nPROCEDURE function_with_argtypes IS comment_text | COMMENT ON ROUTINE \nfunction_with_argtypes IS comment_text | COMMENT ON RULE name ON \nany_name IS comment_text | COMMENT ON TRANSFORM FOR Typename LANGUAGE \nname IS comment_text | COMMENT ON TRIGGER name ON any_name IS \ncomment_text | COMMENT ON OPERATOR CLASS any_name USING access_method IS \ncomment_text | COMMENT ON OPERATOR FAMILY any_name USING access_method \nIS comment_text | COMMENT ON LARGE_P OBJECT_P NumericOnly IS \ncomment_text | COMMENT ON CAST '(' Typename AS Typename ')' IS comment_text\ncomment_type_any_name ::= COLUMN | INDEX | SEQUENCE | STATISTICS | TABLE \n| VIEW | MATERIALIZED VIEW | COLLATION | CONVERSION_P | FOREIGN TABLE | \nTEXT_P SEARCH CONFIGURATION | TEXT_P SEARCH DICTIONARY | TEXT_P SEARCH \nPARSER | TEXT_P SEARCH TEMPLATE\ncomment_type_name ::= ACCESS METHOD | DATABASE | EVENT TRIGGER | \nEXTENSION | FOREIGN DATA_P WRAPPER | opt_procedural LANGUAGE | \nPUBLICATION | ROLE | SCHEMA | SERVER | SUBSCRIPTION | TABLESPACE\ncomment_text ::= Sconst | NULL_P\nSecLabelStmt ::= SECURITY LABEL opt_provider ON \nsecurity_label_type_any_name any_name IS security_label | SECURITY LABEL \nopt_provider ON security_label_type_name name IS security_label | \nSECURITY LABEL opt_provider ON TYPE_P Typename IS security_label | \nSECURITY LABEL opt_provider ON DOMAIN_P Typename IS security_label | \nSECURITY LABEL opt_provider ON AGGREGATE aggregate_with_argtypes IS \nsecurity_label | SECURITY LABEL opt_provider ON FUNCTION \nfunction_with_argtypes IS security_label | SECURITY LABEL opt_provider \nON LARGE_P OBJECT_P NumericOnly IS security_label | SECURITY LABEL \nopt_provider ON PROCEDURE function_with_argtypes IS security_label | \nSECURITY LABEL opt_provider ON ROUTINE function_with_argtypes IS \nsecurity_label\nopt_provider ::= FOR NonReservedWord_or_Sconst |\nsecurity_label_type_any_name ::= COLUMN | FOREIGN TABLE | SEQUENCE | \nTABLE | VIEW | MATERIALIZED VIEW\nsecurity_label_type_name ::= DATABASE | EVENT TRIGGER | opt_procedural \nLANGUAGE | PUBLICATION | ROLE | SCHEMA | SUBSCRIPTION | TABLESPACE\nsecurity_label ::= Sconst | NULL_P\nFetchStmt ::= FETCH fetch_args | MOVE fetch_args\nfetch_args ::= cursor_name | from_in cursor_name | NEXT opt_from_in \ncursor_name | PRIOR opt_from_in cursor_name | FIRST_P opt_from_in \ncursor_name | LAST_P opt_from_in cursor_name | ABSOLUTE_P SignedIconst \nopt_from_in cursor_name | RELATIVE_P SignedIconst opt_from_in \ncursor_name | SignedIconst opt_from_in cursor_name | ALL opt_from_in \ncursor_name | FORWARD opt_from_in cursor_name | FORWARD SignedIconst \nopt_from_in cursor_name | FORWARD ALL opt_from_in cursor_name | BACKWARD \nopt_from_in cursor_name | BACKWARD SignedIconst opt_from_in cursor_name \n| BACKWARD ALL opt_from_in cursor_name\nfrom_in ::= FROM | IN_P\nopt_from_in ::= from_in |\nGrantStmt ::= GRANT privileges ON privilege_target TO grantee_list \nopt_grant_grant_option\nRevokeStmt ::= REVOKE privileges ON privilege_target FROM grantee_list \nopt_drop_behavior | REVOKE GRANT OPTION FOR privileges ON \nprivilege_target FROM grantee_list opt_drop_behavior\nprivileges ::= privilege_list | ALL | ALL PRIVILEGES | ALL '(' \ncolumnList ')' | ALL PRIVILEGES '(' columnList ')'\nprivilege_list ::= privilege | privilege_list ',' privilege\nprivilege ::= SELECT opt_column_list | REFERENCES opt_column_list | \nCREATE opt_column_list | ColId opt_column_list\nprivilege_target ::= qualified_name_list | TABLE qualified_name_list | \nSEQUENCE qualified_name_list | FOREIGN DATA_P WRAPPER name_list | \nFOREIGN SERVER name_list | FUNCTION function_with_argtypes_list | \nPROCEDURE function_with_argtypes_list | ROUTINE \nfunction_with_argtypes_list | DATABASE name_list | DOMAIN_P \nany_name_list | LANGUAGE name_list | LARGE_P OBJECT_P NumericOnly_list | \nSCHEMA name_list | TABLESPACE name_list | TYPE_P any_name_list | ALL \nTABLES IN_P SCHEMA name_list | ALL SEQUENCES IN_P SCHEMA name_list | ALL \nFUNCTIONS IN_P SCHEMA name_list | ALL PROCEDURES IN_P SCHEMA name_list | \nALL ROUTINES IN_P SCHEMA name_list\ngrantee_list ::= grantee | grantee_list ',' grantee\ngrantee ::= RoleSpec | GROUP_P RoleSpec\nopt_grant_grant_option ::= WITH GRANT OPTION |\nGrantRoleStmt ::= GRANT privilege_list TO role_list \nopt_grant_admin_option opt_granted_by\nRevokeRoleStmt ::= REVOKE privilege_list FROM role_list opt_granted_by \nopt_drop_behavior | REVOKE ADMIN OPTION FOR privilege_list FROM \nrole_list opt_granted_by opt_drop_behavior\nopt_grant_admin_option ::= WITH ADMIN OPTION |\nopt_granted_by ::= GRANTED BY RoleSpec |\nAlterDefaultPrivilegesStmt ::= ALTER DEFAULT PRIVILEGES DefACLOptionList \nDefACLAction\nDefACLOptionList ::= DefACLOptionList DefACLOption |\nDefACLOption ::= IN_P SCHEMA name_list | FOR ROLE role_list | FOR USER \nrole_list\nDefACLAction ::= GRANT privileges ON defacl_privilege_target TO \ngrantee_list opt_grant_grant_option | REVOKE privileges ON \ndefacl_privilege_target FROM grantee_list opt_drop_behavior | REVOKE \nGRANT OPTION FOR privileges ON defacl_privilege_target FROM grantee_list \nopt_drop_behavior\ndefacl_privilege_target ::= TABLES | FUNCTIONS | ROUTINES | SEQUENCES | \nTYPES_P | SCHEMAS\nIndexStmt ::= CREATE opt_unique INDEX opt_concurrently opt_index_name ON \nrelation_expr access_method_clause '(' index_params ')' opt_include \nopt_reloptions OptTableSpace where_clause | CREATE opt_unique INDEX \nopt_concurrently IF_P NOT EXISTS index_name ON relation_expr \naccess_method_clause '(' index_params ')' opt_include opt_reloptions \nOptTableSpace where_clause\nopt_unique ::= UNIQUE |\nopt_concurrently ::= CONCURRENTLY |\nopt_index_name ::= index_name |\naccess_method_clause ::= USING access_method |\nindex_params ::= index_elem | index_params ',' index_elem\nindex_elem_options ::= opt_collate opt_class opt_asc_desc \nopt_nulls_order | opt_collate any_name reloptions opt_asc_desc \nopt_nulls_order\nindex_elem ::= ColId index_elem_options | func_expr_windowless \nindex_elem_options | '(' a_expr ')' index_elem_options\nopt_include ::= INCLUDE '(' index_including_params ')' |\nindex_including_params ::= index_elem | index_including_params ',' \nindex_elem\nopt_collate ::= COLLATE any_name |\nopt_class ::= any_name |\nopt_asc_desc ::= ASC | DESC |\nopt_nulls_order ::= NULLS_LA FIRST_P | NULLS_LA LAST_P |\nCreateFunctionStmt ::= CREATE opt_or_replace FUNCTION func_name \nfunc_args_with_defaults RETURNS func_return createfunc_opt_list | CREATE \nopt_or_replace FUNCTION func_name func_args_with_defaults RETURNS TABLE \n'(' table_func_column_list ')' createfunc_opt_list | CREATE \nopt_or_replace FUNCTION func_name func_args_with_defaults \ncreatefunc_opt_list | CREATE opt_or_replace PROCEDURE func_name \nfunc_args_with_defaults createfunc_opt_list\nopt_or_replace ::= OR REPLACE |\nfunc_args ::= '(' func_args_list ')' | '(' ')'\nfunc_args_list ::= func_arg | func_args_list ',' func_arg\nfunction_with_argtypes_list ::= function_with_argtypes | \nfunction_with_argtypes_list ',' function_with_argtypes\nfunction_with_argtypes ::= func_name func_args | type_func_name_keyword \n| ColId | ColId indirection\nfunc_args_with_defaults ::= '(' func_args_with_defaults_list ')' | '(' ')'\nfunc_args_with_defaults_list ::= func_arg_with_default | \nfunc_args_with_defaults_list ',' func_arg_with_default\nfunc_arg ::= arg_class param_name func_type | param_name arg_class \nfunc_type | param_name func_type | arg_class func_type | func_type\narg_class ::= IN_P | OUT_P | INOUT | IN_P OUT_P | VARIADIC\nparam_name ::= type_function_name\nfunc_return ::= func_type\nfunc_type ::= Typename | type_function_name attrs '%' TYPE_P | SETOF \ntype_function_name attrs '%' TYPE_P\nfunc_arg_with_default ::= func_arg | func_arg DEFAULT a_expr | func_arg \n'=' a_expr\naggr_arg ::= func_arg\naggr_args ::= '(' '*' ')' | '(' aggr_args_list ')' | '(' ORDER BY \naggr_args_list ')' | '(' aggr_args_list ORDER BY aggr_args_list ')'\naggr_args_list ::= aggr_arg | aggr_args_list ',' aggr_arg\naggregate_with_argtypes ::= func_name aggr_args\naggregate_with_argtypes_list ::= aggregate_with_argtypes | \naggregate_with_argtypes_list ',' aggregate_with_argtypes\ncreatefunc_opt_list ::= createfunc_opt_item | createfunc_opt_list \ncreatefunc_opt_item\ncommon_func_opt_item ::= CALLED ON NULL_P INPUT_P | RETURNS NULL_P ON \nNULL_P INPUT_P | STRICT_P | IMMUTABLE | STABLE | VOLATILE | EXTERNAL \nSECURITY DEFINER | EXTERNAL SECURITY INVOKER | SECURITY DEFINER | \nSECURITY INVOKER | LEAKPROOF | NOT LEAKPROOF | COST NumericOnly | ROWS \nNumericOnly | SUPPORT any_name | FunctionSetResetClause | PARALLEL ColId\ncreatefunc_opt_item ::= AS func_as | LANGUAGE NonReservedWord_or_Sconst \n| TRANSFORM transform_type_list | WINDOW | common_func_opt_item\nfunc_as ::= Sconst | Sconst ',' Sconst\ntransform_type_list ::= FOR TYPE_P Typename | transform_type_list ',' \nFOR TYPE_P Typename\nopt_definition ::= WITH definition |\ntable_func_column ::= param_name func_type\ntable_func_column_list ::= table_func_column | table_func_column_list \n',' table_func_column\nAlterFunctionStmt ::= ALTER FUNCTION function_with_argtypes \nalterfunc_opt_list opt_restrict | ALTER PROCEDURE function_with_argtypes \nalterfunc_opt_list opt_restrict | ALTER ROUTINE function_with_argtypes \nalterfunc_opt_list opt_restrict\nalterfunc_opt_list ::= common_func_opt_item | alterfunc_opt_list \ncommon_func_opt_item\nopt_restrict ::= RESTRICT | /*empty*/\nRemoveFuncStmt ::= DROP FUNCTION function_with_argtypes_list \nopt_drop_behavior | DROP FUNCTION IF_P EXISTS \nfunction_with_argtypes_list opt_drop_behavior | DROP PROCEDURE \nfunction_with_argtypes_list opt_drop_behavior | DROP PROCEDURE IF_P \nEXISTS function_with_argtypes_list opt_drop_behavior | DROP ROUTINE \nfunction_with_argtypes_list opt_drop_behavior | DROP ROUTINE IF_P EXISTS \nfunction_with_argtypes_list opt_drop_behavior\nRemoveAggrStmt ::= DROP AGGREGATE aggregate_with_argtypes_list \nopt_drop_behavior | DROP AGGREGATE IF_P EXISTS \naggregate_with_argtypes_list opt_drop_behavior\nRemoveOperStmt ::= DROP OPERATOR operator_with_argtypes_list \nopt_drop_behavior | DROP OPERATOR IF_P EXISTS \noperator_with_argtypes_list opt_drop_behavior\noper_argtypes ::= '(' Typename ')' | '(' Typename ',' Typename ')' | '(' \nNONE ',' Typename ')' | '(' Typename ',' NONE ')'\nany_operator ::= all_Op | ColId '.' any_operator\noperator_with_argtypes_list ::= operator_with_argtypes | \noperator_with_argtypes_list ',' operator_with_argtypes\noperator_with_argtypes ::= any_operator oper_argtypes\nDoStmt ::= DO dostmt_opt_list\ndostmt_opt_list ::= dostmt_opt_item | dostmt_opt_list dostmt_opt_item\ndostmt_opt_item ::= Sconst | LANGUAGE NonReservedWord_or_Sconst\nCreateCastStmt ::= CREATE CAST '(' Typename AS Typename ')' WITH \nFUNCTION function_with_argtypes cast_context | CREATE CAST '(' Typename \nAS Typename ')' WITHOUT FUNCTION cast_context | CREATE CAST '(' Typename \nAS Typename ')' WITH INOUT cast_context\ncast_context ::= AS IMPLICIT_P | AS ASSIGNMENT |\nDropCastStmt ::= DROP CAST opt_if_exists '(' Typename AS Typename ')' \nopt_drop_behavior\nopt_if_exists ::= IF_P EXISTS |\nCreateTransformStmt ::= CREATE opt_or_replace TRANSFORM FOR Typename \nLANGUAGE name '(' transform_element_list ')'\ntransform_element_list ::= FROM SQL_P WITH FUNCTION \nfunction_with_argtypes ',' TO SQL_P WITH FUNCTION function_with_argtypes \n| TO SQL_P WITH FUNCTION function_with_argtypes ',' FROM SQL_P WITH \nFUNCTION function_with_argtypes | FROM SQL_P WITH FUNCTION \nfunction_with_argtypes | TO SQL_P WITH FUNCTION function_with_argtypes\nDropTransformStmt ::= DROP TRANSFORM opt_if_exists FOR Typename LANGUAGE \nname opt_drop_behavior\nReindexStmt ::= REINDEX reindex_target_type opt_concurrently \nqualified_name | REINDEX reindex_target_multitable opt_concurrently name \n| REINDEX '(' reindex_option_list ')' reindex_target_type \nopt_concurrently qualified_name | REINDEX '(' reindex_option_list ')' \nreindex_target_multitable opt_concurrently name\nreindex_target_type ::= INDEX | TABLE\nreindex_target_multitable ::= SCHEMA | SYSTEM_P | DATABASE\nreindex_option_list ::= reindex_option_elem | reindex_option_list ',' \nreindex_option_elem\nreindex_option_elem ::= VERBOSE\nAlterTblSpcStmt ::= ALTER TABLESPACE name SET reloptions | ALTER \nTABLESPACE name RESET reloptions\nRenameStmt ::= ALTER AGGREGATE aggregate_with_argtypes RENAME TO name | \nALTER COLLATION any_name RENAME TO name | ALTER CONVERSION_P any_name \nRENAME TO name | ALTER DATABASE database_name RENAME TO database_name | \nALTER DOMAIN_P any_name RENAME TO name | ALTER DOMAIN_P any_name RENAME \nCONSTRAINT name TO name | ALTER FOREIGN DATA_P WRAPPER name RENAME TO \nname | ALTER FUNCTION function_with_argtypes RENAME TO name | ALTER \nGROUP_P RoleId RENAME TO RoleId | ALTER opt_procedural LANGUAGE name \nRENAME TO name | ALTER OPERATOR CLASS any_name USING access_method \nRENAME TO name | ALTER OPERATOR FAMILY any_name USING access_method \nRENAME TO name | ALTER POLICY name ON qualified_name RENAME TO name | \nALTER POLICY IF_P EXISTS name ON qualified_name RENAME TO name | ALTER \nPROCEDURE function_with_argtypes RENAME TO name | ALTER PUBLICATION name \nRENAME TO name | ALTER ROUTINE function_with_argtypes RENAME TO name | \nALTER SCHEMA name RENAME TO name | ALTER SERVER name RENAME TO name | \nALTER SUBSCRIPTION name RENAME TO name | ALTER TABLE relation_expr \nRENAME TO name | ALTER TABLE IF_P EXISTS relation_expr RENAME TO name | \nALTER SEQUENCE qualified_name RENAME TO name | ALTER SEQUENCE IF_P \nEXISTS qualified_name RENAME TO name | ALTER VIEW qualified_name RENAME \nTO name | ALTER VIEW IF_P EXISTS qualified_name RENAME TO name | ALTER \nMATERIALIZED VIEW qualified_name RENAME TO name | ALTER MATERIALIZED \nVIEW IF_P EXISTS qualified_name RENAME TO name | ALTER INDEX \nqualified_name RENAME TO name | ALTER INDEX IF_P EXISTS qualified_name \nRENAME TO name | ALTER FOREIGN TABLE relation_expr RENAME TO name | \nALTER FOREIGN TABLE IF_P EXISTS relation_expr RENAME TO name | ALTER \nTABLE relation_expr RENAME opt_column name TO name | ALTER TABLE IF_P \nEXISTS relation_expr RENAME opt_column name TO name | ALTER VIEW \nqualified_name RENAME opt_column name TO name | ALTER VIEW IF_P EXISTS \nqualified_name RENAME opt_column name TO name | ALTER MATERIALIZED VIEW \nqualified_name RENAME opt_column name TO name | ALTER MATERIALIZED VIEW \nIF_P EXISTS qualified_name RENAME opt_column name TO name | ALTER TABLE \nrelation_expr RENAME CONSTRAINT name TO name | ALTER TABLE IF_P EXISTS \nrelation_expr RENAME CONSTRAINT name TO name | ALTER FOREIGN TABLE \nrelation_expr RENAME opt_column name TO name | ALTER FOREIGN TABLE IF_P \nEXISTS relation_expr RENAME opt_column name TO name | ALTER RULE name ON \nqualified_name RENAME TO name | ALTER TRIGGER name ON qualified_name \nRENAME TO name | ALTER EVENT TRIGGER name RENAME TO name | ALTER ROLE \nRoleId RENAME TO RoleId | ALTER USER RoleId RENAME TO RoleId | ALTER \nTABLESPACE name RENAME TO name | ALTER STATISTICS any_name RENAME TO \nname | ALTER TEXT_P SEARCH PARSER any_name RENAME TO name | ALTER TEXT_P \nSEARCH DICTIONARY any_name RENAME TO name | ALTER TEXT_P SEARCH TEMPLATE \nany_name RENAME TO name | ALTER TEXT_P SEARCH CONFIGURATION any_name \nRENAME TO name | ALTER TYPE_P any_name RENAME TO name | ALTER TYPE_P \nany_name RENAME ATTRIBUTE name TO name opt_drop_behavior\nopt_column ::= COLUMN |\nopt_set_data ::= SET DATA_P |\nAlterObjectDependsStmt ::= ALTER FUNCTION function_with_argtypes opt_no \nDEPENDS ON EXTENSION name | ALTER PROCEDURE function_with_argtypes \nopt_no DEPENDS ON EXTENSION name | ALTER ROUTINE function_with_argtypes \nopt_no DEPENDS ON EXTENSION name | ALTER TRIGGER name ON qualified_name \nopt_no DEPENDS ON EXTENSION name | ALTER MATERIALIZED VIEW \nqualified_name opt_no DEPENDS ON EXTENSION name | ALTER INDEX \nqualified_name opt_no DEPENDS ON EXTENSION name\nopt_no ::= NO |\nAlterObjectSchemaStmt ::= ALTER AGGREGATE aggregate_with_argtypes SET \nSCHEMA name | ALTER COLLATION any_name SET SCHEMA name | ALTER \nCONVERSION_P any_name SET SCHEMA name | ALTER DOMAIN_P any_name SET \nSCHEMA name | ALTER EXTENSION name SET SCHEMA name | ALTER FUNCTION \nfunction_with_argtypes SET SCHEMA name | ALTER OPERATOR \noperator_with_argtypes SET SCHEMA name | ALTER OPERATOR CLASS any_name \nUSING access_method SET SCHEMA name | ALTER OPERATOR FAMILY any_name \nUSING access_method SET SCHEMA name | ALTER PROCEDURE \nfunction_with_argtypes SET SCHEMA name | ALTER ROUTINE \nfunction_with_argtypes SET SCHEMA name | ALTER TABLE relation_expr SET \nSCHEMA name | ALTER TABLE IF_P EXISTS relation_expr SET SCHEMA name | \nALTER STATISTICS any_name SET SCHEMA name | ALTER TEXT_P SEARCH PARSER \nany_name SET SCHEMA name | ALTER TEXT_P SEARCH DICTIONARY any_name SET \nSCHEMA name | ALTER TEXT_P SEARCH TEMPLATE any_name SET SCHEMA name | \nALTER TEXT_P SEARCH CONFIGURATION any_name SET SCHEMA name | ALTER \nSEQUENCE qualified_name SET SCHEMA name | ALTER SEQUENCE IF_P EXISTS \nqualified_name SET SCHEMA name | ALTER VIEW qualified_name SET SCHEMA \nname | ALTER VIEW IF_P EXISTS qualified_name SET SCHEMA name | ALTER \nMATERIALIZED VIEW qualified_name SET SCHEMA name | ALTER MATERIALIZED \nVIEW IF_P EXISTS qualified_name SET SCHEMA name | ALTER FOREIGN TABLE \nrelation_expr SET SCHEMA name | ALTER FOREIGN TABLE IF_P EXISTS \nrelation_expr SET SCHEMA name | ALTER TYPE_P any_name SET SCHEMA name\nAlterOperatorStmt ::= ALTER OPERATOR operator_with_argtypes SET '(' \noperator_def_list ')'\noperator_def_list ::= operator_def_elem | operator_def_list ',' \noperator_def_elem\noperator_def_elem ::= ColLabel '=' NONE | ColLabel '=' operator_def_arg\noperator_def_arg ::= func_type | reserved_keyword | qual_all_Op | \nNumericOnly | Sconst\nAlterTypeStmt ::= ALTER TYPE_P any_name SET '(' operator_def_list ')'\nAlterOwnerStmt ::= ALTER AGGREGATE aggregate_with_argtypes OWNER TO \nRoleSpec | ALTER COLLATION any_name OWNER TO RoleSpec | ALTER \nCONVERSION_P any_name OWNER TO RoleSpec | ALTER DATABASE database_name \nOWNER TO RoleSpec | ALTER DOMAIN_P any_name OWNER TO RoleSpec | ALTER \nFUNCTION function_with_argtypes OWNER TO RoleSpec | ALTER opt_procedural \nLANGUAGE name OWNER TO RoleSpec | ALTER LARGE_P OBJECT_P NumericOnly \nOWNER TO RoleSpec | ALTER OPERATOR operator_with_argtypes OWNER TO \nRoleSpec | ALTER OPERATOR CLASS any_name USING access_method OWNER TO \nRoleSpec | ALTER OPERATOR FAMILY any_name USING access_method OWNER TO \nRoleSpec | ALTER PROCEDURE function_with_argtypes OWNER TO RoleSpec | \nALTER ROUTINE function_with_argtypes OWNER TO RoleSpec | ALTER SCHEMA \nname OWNER TO RoleSpec | ALTER TYPE_P any_name OWNER TO RoleSpec | ALTER \nTABLESPACE name OWNER TO RoleSpec | ALTER STATISTICS any_name OWNER TO \nRoleSpec | ALTER TEXT_P SEARCH DICTIONARY any_name OWNER TO RoleSpec | \nALTER TEXT_P SEARCH CONFIGURATION any_name OWNER TO RoleSpec | ALTER \nFOREIGN DATA_P WRAPPER name OWNER TO RoleSpec | ALTER SERVER name OWNER \nTO RoleSpec | ALTER EVENT TRIGGER name OWNER TO RoleSpec | ALTER \nPUBLICATION name OWNER TO RoleSpec | ALTER SUBSCRIPTION name OWNER TO \nRoleSpec\nCreatePublicationStmt ::= CREATE PUBLICATION name \nopt_publication_for_tables opt_definition\nopt_publication_for_tables ::= publication_for_tables |\npublication_for_tables ::= FOR TABLE relation_expr_list | FOR ALL TABLES\nAlterPublicationStmt ::= ALTER PUBLICATION name SET definition | ALTER \nPUBLICATION name ADD_P TABLE relation_expr_list | ALTER PUBLICATION name \nSET TABLE relation_expr_list | ALTER PUBLICATION name DROP TABLE \nrelation_expr_list\nCreateSubscriptionStmt ::= CREATE SUBSCRIPTION name CONNECTION Sconst \nPUBLICATION publication_name_list opt_definition\npublication_name_list ::= publication_name_item | publication_name_list \n',' publication_name_item\npublication_name_item ::= ColLabel\nAlterSubscriptionStmt ::= ALTER SUBSCRIPTION name SET definition | ALTER \nSUBSCRIPTION name CONNECTION Sconst | ALTER SUBSCRIPTION name REFRESH \nPUBLICATION opt_definition | ALTER SUBSCRIPTION name SET PUBLICATION \npublication_name_list opt_definition | ALTER SUBSCRIPTION name ENABLE_P \n| ALTER SUBSCRIPTION name DISABLE_P\nDropSubscriptionStmt ::= DROP SUBSCRIPTION name opt_drop_behavior | DROP \nSUBSCRIPTION IF_P EXISTS name opt_drop_behavior\nRuleStmt ::= CREATE opt_or_replace RULE name AS ON event TO \nqualified_name where_clause DO opt_instead RuleActionList\nRuleActionList ::= NOTHING | RuleActionStmt | '(' RuleActionMulti ')'\nRuleActionMulti ::= RuleActionMulti ';' RuleActionStmtOrEmpty | \nRuleActionStmtOrEmpty\nRuleActionStmt ::= SelectStmt | InsertStmt | UpdateStmt | DeleteStmt | \nNotifyStmt\nRuleActionStmtOrEmpty ::= RuleActionStmt |\nevent ::= SELECT | UPDATE | DELETE_P | INSERT\nopt_instead ::= INSTEAD | ALSO |\nNotifyStmt ::= NOTIFY ColId notify_payload\nnotify_payload ::= ',' Sconst |\nListenStmt ::= LISTEN ColId\nUnlistenStmt ::= UNLISTEN ColId | UNLISTEN '*'\nTransactionStmt ::= ABORT_P opt_transaction opt_transaction_chain | \nBEGIN_P opt_transaction transaction_mode_list_or_empty | START \nTRANSACTION transaction_mode_list_or_empty | COMMIT opt_transaction \nopt_transaction_chain | END_P opt_transaction opt_transaction_chain | \nROLLBACK opt_transaction opt_transaction_chain | SAVEPOINT ColId | \nRELEASE SAVEPOINT ColId | RELEASE ColId | ROLLBACK opt_transaction TO \nSAVEPOINT ColId | ROLLBACK opt_transaction TO ColId | PREPARE \nTRANSACTION Sconst | COMMIT PREPARED Sconst | ROLLBACK PREPARED Sconst\nopt_transaction ::= WORK | TRANSACTION |\ntransaction_mode_item ::= ISOLATION LEVEL iso_level | READ ONLY | READ \nWRITE | DEFERRABLE | NOT DEFERRABLE\ntransaction_mode_list ::= transaction_mode_item | transaction_mode_list \n',' transaction_mode_item | transaction_mode_list transaction_mode_item\ntransaction_mode_list_or_empty ::= transaction_mode_list |\nopt_transaction_chain ::= AND CHAIN | AND NO CHAIN |\nViewStmt ::= CREATE OptTemp VIEW qualified_name opt_column_list \nopt_reloptions AS SelectStmt opt_check_option | CREATE OR REPLACE \nOptTemp VIEW qualified_name opt_column_list opt_reloptions AS SelectStmt \nopt_check_option | CREATE OptTemp RECURSIVE VIEW qualified_name '(' \ncolumnList ')' opt_reloptions AS SelectStmt opt_check_option | CREATE OR \nREPLACE OptTemp RECURSIVE VIEW qualified_name '(' columnList ')' \nopt_reloptions AS SelectStmt opt_check_option\nopt_check_option ::= WITH CHECK OPTION | WITH CASCADED CHECK OPTION | \nWITH LOCAL CHECK OPTION |\nLoadStmt ::= LOAD file_name\nCreatedbStmt ::= CREATE DATABASE database_name opt_with createdb_opt_list\ncreatedb_opt_list ::= createdb_opt_items |\ncreatedb_opt_items ::= createdb_opt_item | createdb_opt_items \ncreatedb_opt_item\ncreatedb_opt_item ::= createdb_opt_name opt_equal SignedIconst | \ncreatedb_opt_name opt_equal opt_boolean_or_string | createdb_opt_name \nopt_equal DEFAULT\ncreatedb_opt_name ::= IDENT | CONNECTION LIMIT | ENCODING | LOCATION | \nOWNER | TABLESPACE | TEMPLATE\nopt_equal ::= '=' |\nAlterDatabaseStmt ::= ALTER DATABASE database_name WITH \ncreatedb_opt_list | ALTER DATABASE database_name createdb_opt_list | \nALTER DATABASE database_name SET TABLESPACE name\nAlterDatabaseSetStmt ::= ALTER DATABASE database_name SetResetClause\nDropdbStmt ::= DROP DATABASE database_name | DROP DATABASE IF_P EXISTS \ndatabase_name | DROP DATABASE database_name opt_with '(' \ndrop_option_list ')' | DROP DATABASE IF_P EXISTS database_name opt_with \n'(' drop_option_list ')'\ndrop_option_list ::= drop_option | drop_option_list ',' drop_option\ndrop_option ::= FORCE\nAlterCollationStmt ::= ALTER COLLATION any_name REFRESH VERSION_P\nAlterSystemStmt ::= ALTER SYSTEM_P SET generic_set | ALTER SYSTEM_P \nRESET generic_reset\nCreateDomainStmt ::= CREATE DOMAIN_P any_name opt_as Typename ColQualList\nAlterDomainStmt ::= ALTER DOMAIN_P any_name alter_column_default | ALTER \nDOMAIN_P any_name DROP NOT NULL_P | ALTER DOMAIN_P any_name SET NOT \nNULL_P | ALTER DOMAIN_P any_name ADD_P TableConstraint | ALTER DOMAIN_P \nany_name DROP CONSTRAINT name opt_drop_behavior | ALTER DOMAIN_P \nany_name DROP CONSTRAINT IF_P EXISTS name opt_drop_behavior | ALTER \nDOMAIN_P any_name VALIDATE CONSTRAINT name\nopt_as ::= AS |\nAlterTSDictionaryStmt ::= ALTER TEXT_P SEARCH DICTIONARY any_name definition\nAlterTSConfigurationStmt ::= ALTER TEXT_P SEARCH CONFIGURATION any_name \nADD_P MAPPING FOR name_list any_with any_name_list | ALTER TEXT_P SEARCH \nCONFIGURATION any_name ALTER MAPPING FOR name_list any_with \nany_name_list | ALTER TEXT_P SEARCH CONFIGURATION any_name ALTER MAPPING \nREPLACE any_name any_with any_name | ALTER TEXT_P SEARCH CONFIGURATION \nany_name ALTER MAPPING FOR name_list REPLACE any_name any_with any_name \n| ALTER TEXT_P SEARCH CONFIGURATION any_name DROP MAPPING FOR name_list \n| ALTER TEXT_P SEARCH CONFIGURATION any_name DROP MAPPING IF_P EXISTS \nFOR name_list\nany_with ::= WITH | WITH_LA\nCreateConversionStmt ::= CREATE opt_default CONVERSION_P any_name FOR \nSconst TO Sconst FROM any_name\nClusterStmt ::= CLUSTER opt_verbose qualified_name \ncluster_index_specification | CLUSTER opt_verbose | CLUSTER opt_verbose \nindex_name ON qualified_name\ncluster_index_specification ::= USING index_name |\nVacuumStmt ::= VACUUM opt_full opt_freeze opt_verbose opt_analyze \nopt_vacuum_relation_list | VACUUM '(' vac_analyze_option_list ')' \nopt_vacuum_relation_list\nAnalyzeStmt ::= analyze_keyword opt_verbose opt_vacuum_relation_list | \nanalyze_keyword '(' vac_analyze_option_list ')' opt_vacuum_relation_list\nvac_analyze_option_list ::= vac_analyze_option_elem | \nvac_analyze_option_list ',' vac_analyze_option_elem\nanalyze_keyword ::= ANALYZE | ANALYSE\nvac_analyze_option_elem ::= vac_analyze_option_name vac_analyze_option_arg\nvac_analyze_option_name ::= NonReservedWord | analyze_keyword\nvac_analyze_option_arg ::= opt_boolean_or_string | NumericOnly |\nopt_analyze ::= analyze_keyword |\nopt_verbose ::= VERBOSE |\nopt_full ::= FULL |\nopt_freeze ::= FREEZE |\nopt_name_list ::= '(' name_list ')' |\nvacuum_relation ::= qualified_name opt_name_list\nvacuum_relation_list ::= vacuum_relation | vacuum_relation_list ',' \nvacuum_relation\nopt_vacuum_relation_list ::= vacuum_relation_list |\nExplainStmt ::= EXPLAIN ExplainableStmt | EXPLAIN analyze_keyword \nopt_verbose ExplainableStmt | EXPLAIN VERBOSE ExplainableStmt | EXPLAIN \n'(' explain_option_list ')' ExplainableStmt\nExplainableStmt ::= SelectStmt | InsertStmt | UpdateStmt | DeleteStmt | \nDeclareCursorStmt | CreateAsStmt | CreateMatViewStmt | \nRefreshMatViewStmt | ExecuteStmt\nexplain_option_list ::= explain_option_elem | explain_option_list ',' \nexplain_option_elem\nexplain_option_elem ::= explain_option_name explain_option_arg\nexplain_option_name ::= NonReservedWord | analyze_keyword\nexplain_option_arg ::= opt_boolean_or_string | NumericOnly |\nPrepareStmt ::= PREPARE name prep_type_clause AS PreparableStmt\nprep_type_clause ::= '(' type_list ')' |\nPreparableStmt ::= SelectStmt | InsertStmt | UpdateStmt | DeleteStmt\nExecuteStmt ::= EXECUTE name execute_param_clause | CREATE OptTemp TABLE \ncreate_as_target AS EXECUTE name execute_param_clause opt_with_data | \nCREATE OptTemp TABLE IF_P NOT EXISTS create_as_target AS EXECUTE name \nexecute_param_clause opt_with_data\nexecute_param_clause ::= '(' expr_list ')' |\nDeallocateStmt ::= DEALLOCATE name | DEALLOCATE PREPARE name | \nDEALLOCATE ALL | DEALLOCATE PREPARE ALL\nInsertStmt ::= opt_with_clause INSERT INTO insert_target insert_rest \nopt_on_conflict returning_clause\ninsert_target ::= qualified_name | qualified_name AS ColId\ninsert_rest ::= SelectStmt | OVERRIDING override_kind VALUE_P SelectStmt \n| '(' insert_column_list ')' SelectStmt | '(' insert_column_list ')' \nOVERRIDING override_kind VALUE_P SelectStmt | DEFAULT VALUES\noverride_kind ::= USER | SYSTEM_P\ninsert_column_list ::= insert_column_item | insert_column_list ',' \ninsert_column_item\ninsert_column_item ::= ColId opt_indirection\nopt_on_conflict ::= ON CONFLICT opt_conf_expr DO UPDATE SET \nset_clause_list where_clause | ON CONFLICT opt_conf_expr DO NOTHING |\nopt_conf_expr ::= '(' index_params ')' where_clause | ON CONSTRAINT name |\nreturning_clause ::= RETURNING target_list |\nDeleteStmt ::= opt_with_clause DELETE_P FROM relation_expr_opt_alias \nusing_clause where_or_current_clause returning_clause\nusing_clause ::= USING from_list |\nLockStmt ::= LOCK_P opt_table relation_expr_list opt_lock opt_nowait\nopt_lock ::= IN_P lock_type MODE |\nlock_type ::= ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE UPDATE \nEXCLUSIVE | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE | ACCESS EXCLUSIVE\nopt_nowait ::= NOWAIT |\nopt_nowait_or_skip ::= NOWAIT | SKIP LOCKED |\nUpdateStmt ::= opt_with_clause UPDATE relation_expr_opt_alias SET \nset_clause_list from_clause where_or_current_clause returning_clause\nset_clause_list ::= set_clause | set_clause_list ',' set_clause\nset_clause ::= set_target '=' a_expr | '(' set_target_list ')' '=' a_expr\nset_target ::= ColId opt_indirection\nset_target_list ::= set_target | set_target_list ',' set_target\nDeclareCursorStmt ::= DECLARE cursor_name cursor_options CURSOR opt_hold \nFOR SelectStmt\ncursor_name ::= name\ncursor_options ::= | cursor_options NO SCROLL | cursor_options SCROLL | \ncursor_options BINARY | cursor_options INSENSITIVE\nopt_hold ::= | WITH HOLD | WITHOUT HOLD\nSelectStmt ::= select_no_parens | select_with_parens\nselect_with_parens ::= '(' select_no_parens ')' | '(' select_with_parens ')'\nselect_no_parens ::= simple_select | select_clause sort_clause | \nselect_clause opt_sort_clause for_locking_clause opt_select_limit | \nselect_clause opt_sort_clause select_limit opt_for_locking_clause | \nwith_clause select_clause | with_clause select_clause sort_clause | \nwith_clause select_clause opt_sort_clause for_locking_clause \nopt_select_limit | with_clause select_clause opt_sort_clause \nselect_limit opt_for_locking_clause\nselect_clause ::= simple_select | select_with_parens\nsimple_select ::= SELECT opt_all_clause opt_target_list into_clause \nfrom_clause where_clause group_clause having_clause window_clause | \nSELECT distinct_clause target_list into_clause from_clause where_clause \ngroup_clause having_clause window_clause | values_clause | TABLE \nrelation_expr | select_clause UNION all_or_distinct select_clause | \nselect_clause INTERSECT all_or_distinct select_clause | select_clause \nEXCEPT all_or_distinct select_clause\nwith_clause ::= WITH cte_list | WITH_LA cte_list | WITH RECURSIVE cte_list\ncte_list ::= common_table_expr | cte_list ',' common_table_expr\ncommon_table_expr ::= name opt_name_list AS opt_materialized '(' \nPreparableStmt ')'\nopt_materialized ::= MATERIALIZED | NOT MATERIALIZED |\nopt_with_clause ::= with_clause |\ninto_clause ::= INTO OptTempTableName |\nOptTempTableName ::= TEMPORARY opt_table qualified_name | TEMP opt_table \nqualified_name | LOCAL TEMPORARY opt_table qualified_name | LOCAL TEMP \nopt_table qualified_name | GLOBAL TEMPORARY opt_table qualified_name | \nGLOBAL TEMP opt_table qualified_name | UNLOGGED opt_table qualified_name \n| TABLE qualified_name | qualified_name\nopt_table ::= TABLE |\nall_or_distinct ::= ALL | DISTINCT |\ndistinct_clause ::= DISTINCT | DISTINCT ON '(' expr_list ')'\nopt_all_clause ::= ALL |\nopt_sort_clause ::= sort_clause |\nsort_clause ::= ORDER BY sortby_list\nsortby_list ::= sortby | sortby_list ',' sortby\nsortby ::= a_expr USING qual_all_Op opt_nulls_order | a_expr \nopt_asc_desc opt_nulls_order\nselect_limit ::= limit_clause offset_clause | offset_clause limit_clause \n| limit_clause | offset_clause\nopt_select_limit ::= select_limit |\nlimit_clause ::= LIMIT select_limit_value | LIMIT select_limit_value ',' \nselect_offset_value | FETCH first_or_next select_fetch_first_value \nrow_or_rows ONLY | FETCH first_or_next select_fetch_first_value \nrow_or_rows WITH TIES | FETCH first_or_next row_or_rows ONLY | FETCH \nfirst_or_next row_or_rows WITH TIES\noffset_clause ::= OFFSET select_offset_value | OFFSET \nselect_fetch_first_value row_or_rows\nselect_limit_value ::= a_expr | ALL\nselect_offset_value ::= a_expr\nselect_fetch_first_value ::= c_expr | '+' I_or_F_const | '-' I_or_F_const\nI_or_F_const ::= Iconst | FCONST\nrow_or_rows ::= ROW | ROWS\nfirst_or_next ::= FIRST_P | NEXT\ngroup_clause ::= GROUP_P BY group_by_list |\ngroup_by_list ::= group_by_item | group_by_list ',' group_by_item\ngroup_by_item ::= a_expr | empty_grouping_set | cube_clause | \nrollup_clause | grouping_sets_clause\nempty_grouping_set ::= '(' ')'\nrollup_clause ::= ROLLUP '(' expr_list ')'\ncube_clause ::= CUBE '(' expr_list ')'\ngrouping_sets_clause ::= GROUPING SETS '(' group_by_list ')'\nhaving_clause ::= HAVING a_expr |\nfor_locking_clause ::= for_locking_items | FOR READ ONLY\nopt_for_locking_clause ::= for_locking_clause |\nfor_locking_items ::= for_locking_item | for_locking_items for_locking_item\nfor_locking_item ::= for_locking_strength locked_rels_list \nopt_nowait_or_skip\nfor_locking_strength ::= FOR UPDATE | FOR NO KEY UPDATE | FOR SHARE | \nFOR KEY SHARE\nlocked_rels_list ::= OF qualified_name_list |\nvalues_clause ::= VALUES '(' expr_list ')' | values_clause ',' '(' \nexpr_list ')'\nfrom_clause ::= FROM from_list |\nfrom_list ::= table_ref | from_list ',' table_ref\ntable_ref ::= relation_expr opt_alias_clause | relation_expr \nopt_alias_clause tablesample_clause | func_table func_alias_clause | \nLATERAL_P func_table func_alias_clause | xmltable opt_alias_clause | \nLATERAL_P xmltable opt_alias_clause | select_with_parens \nopt_alias_clause | LATERAL_P select_with_parens opt_alias_clause | \njoined_table | '(' joined_table ')' alias_clause\njoined_table ::= '(' joined_table ')' | table_ref CROSS JOIN table_ref | \ntable_ref join_type JOIN table_ref join_qual | table_ref JOIN table_ref \njoin_qual | table_ref NATURAL join_type JOIN table_ref | table_ref \nNATURAL JOIN table_ref\nalias_clause ::= AS ColId '(' name_list ')' | AS ColId | ColId '(' \nname_list ')' | ColId\nopt_alias_clause ::= alias_clause |\nfunc_alias_clause ::= alias_clause | AS '(' TableFuncElementList ')' | \nAS ColId '(' TableFuncElementList ')' | ColId '(' TableFuncElementList ')' |\njoin_type ::= FULL join_outer | LEFT join_outer | RIGHT join_outer | INNER_P\njoin_outer ::= OUTER_P |\njoin_qual ::= USING '(' name_list ')' | ON a_expr\nrelation_expr ::= qualified_name | qualified_name '*' | ONLY \nqualified_name | ONLY '(' qualified_name ')'\nrelation_expr_list ::= relation_expr | relation_expr_list ',' relation_expr\nrelation_expr_opt_alias ::= relation_expr | relation_expr ColId | \nrelation_expr AS ColId\ntablesample_clause ::= TABLESAMPLE func_name '(' expr_list ')' \nopt_repeatable_clause\nopt_repeatable_clause ::= REPEATABLE '(' a_expr ')' |\nfunc_table ::= func_expr_windowless opt_ordinality | ROWS FROM '(' \nrowsfrom_list ')' opt_ordinality\nrowsfrom_item ::= func_expr_windowless opt_col_def_list\nrowsfrom_list ::= rowsfrom_item | rowsfrom_list ',' rowsfrom_item\nopt_col_def_list ::= AS '(' TableFuncElementList ')' |\nopt_ordinality ::= WITH_LA ORDINALITY |\nwhere_clause ::= WHERE a_expr |\nwhere_or_current_clause ::= WHERE a_expr | WHERE CURRENT_P OF cursor_name |\nOptTableFuncElementList ::= TableFuncElementList |\nTableFuncElementList ::= TableFuncElement | TableFuncElementList ',' \nTableFuncElement\nTableFuncElement ::= ColId Typename opt_collate_clause\nxmltable ::= XMLTABLE '(' c_expr xmlexists_argument COLUMNS \nxmltable_column_list ')' | XMLTABLE '(' XMLNAMESPACES '(' \nxml_namespace_list ')' ',' c_expr xmlexists_argument COLUMNS \nxmltable_column_list ')'\nxmltable_column_list ::= xmltable_column_el | xmltable_column_list ',' \nxmltable_column_el\nxmltable_column_el ::= ColId Typename | ColId Typename \nxmltable_column_option_list | ColId FOR ORDINALITY\nxmltable_column_option_list ::= xmltable_column_option_el | \nxmltable_column_option_list xmltable_column_option_el\nxmltable_column_option_el ::= IDENT b_expr | DEFAULT b_expr | NOT NULL_P \n| NULL_P\nxml_namespace_list ::= xml_namespace_el | xml_namespace_list ',' \nxml_namespace_el\nxml_namespace_el ::= b_expr AS ColLabel | DEFAULT b_expr\nTypename ::= SimpleTypename opt_array_bounds | SETOF SimpleTypename \nopt_array_bounds | SimpleTypename ARRAY '[' Iconst ']' | SETOF \nSimpleTypename ARRAY '[' Iconst ']' | SimpleTypename ARRAY | SETOF \nSimpleTypename ARRAY\nopt_array_bounds ::= opt_array_bounds '[' ']' | opt_array_bounds '[' \nIconst ']' |\nSimpleTypename ::= GenericType | Numeric | Bit | Character | \nConstDatetime | ConstInterval opt_interval | ConstInterval '(' Iconst ')'\nConstTypename ::= Numeric | ConstBit | ConstCharacter | ConstDatetime\nGenericType ::= type_function_name opt_type_modifiers | \ntype_function_name attrs opt_type_modifiers\nopt_type_modifiers ::= '(' expr_list ')' |\nNumeric ::= INT_P | INTEGER | SMALLINT | BIGINT | REAL | FLOAT_P \nopt_float | DOUBLE_P PRECISION | DECIMAL_P opt_type_modifiers | DEC \nopt_type_modifiers | NUMERIC opt_type_modifiers | BOOLEAN_P\nopt_float ::= '(' Iconst ')' |\nBit ::= BitWithLength | BitWithoutLength\nConstBit ::= BitWithLength | BitWithoutLength\nBitWithLength ::= BIT opt_varying '(' expr_list ')'\nBitWithoutLength ::= BIT opt_varying\nCharacter ::= CharacterWithLength | CharacterWithoutLength\nConstCharacter ::= CharacterWithLength | CharacterWithoutLength\nCharacterWithLength ::= character '(' Iconst ')'\nCharacterWithoutLength ::= character\ncharacter ::= CHARACTER opt_varying | CHAR_P opt_varying | VARCHAR | \nNATIONAL CHARACTER opt_varying | NATIONAL CHAR_P opt_varying | NCHAR \nopt_varying\nopt_varying ::= VARYING |\nConstDatetime ::= TIMESTAMP '(' Iconst ')' opt_timezone | TIMESTAMP \nopt_timezone | TIME '(' Iconst ')' opt_timezone | TIME opt_timezone\nConstInterval ::= INTERVAL\nopt_timezone ::= WITH_LA TIME ZONE | WITHOUT TIME ZONE |\nopt_interval ::= YEAR_P | MONTH_P | DAY_P | HOUR_P | MINUTE_P | \ninterval_second | YEAR_P TO MONTH_P | DAY_P TO HOUR_P | DAY_P TO \nMINUTE_P | DAY_P TO interval_second | HOUR_P TO MINUTE_P | HOUR_P TO \ninterval_second | MINUTE_P TO interval_second |\ninterval_second ::= SECOND_P | SECOND_P '(' Iconst ')'\na_expr ::= c_expr | a_expr TYPECAST Typename | a_expr COLLATE any_name | \na_expr AT TIME ZONE a_expr | '+' a_expr | '-' a_expr | a_expr '+' a_expr \n| a_expr '-' a_expr | a_expr '*' a_expr | a_expr '/' a_expr | a_expr '%' \na_expr | a_expr '^' a_expr | a_expr '<' a_expr | a_expr '>' a_expr | \na_expr '=' a_expr | a_expr LESS_EQUALS a_expr | a_expr GREATER_EQUALS \na_expr | a_expr NOT_EQUALS a_expr | a_expr qual_Op a_expr | qual_Op \na_expr | a_expr qual_Op | a_expr AND a_expr | a_expr OR a_expr | NOT \na_expr | NOT_LA a_expr | a_expr LIKE a_expr | a_expr LIKE a_expr ESCAPE \na_expr | a_expr NOT_LA LIKE a_expr | a_expr NOT_LA LIKE a_expr ESCAPE \na_expr | a_expr ILIKE a_expr | a_expr ILIKE a_expr ESCAPE a_expr | \na_expr NOT_LA ILIKE a_expr | a_expr NOT_LA ILIKE a_expr ESCAPE a_expr | \na_expr SIMILAR TO a_expr | a_expr SIMILAR TO a_expr ESCAPE a_expr | \na_expr NOT_LA SIMILAR TO a_expr | a_expr NOT_LA SIMILAR TO a_expr ESCAPE \na_expr | a_expr IS NULL_P | a_expr ISNULL | a_expr IS NOT NULL_P | \na_expr NOTNULL | row OVERLAPS row | a_expr IS TRUE_P | a_expr IS NOT \nTRUE_P | a_expr IS FALSE_P | a_expr IS NOT FALSE_P | a_expr IS UNKNOWN | \na_expr IS NOT UNKNOWN | a_expr IS DISTINCT FROM a_expr | a_expr IS NOT \nDISTINCT FROM a_expr | a_expr IS OF '(' type_list ')' | a_expr IS NOT OF \n'(' type_list ')' | a_expr BETWEEN opt_asymmetric b_expr AND a_expr | \na_expr NOT_LA BETWEEN opt_asymmetric b_expr AND a_expr | a_expr BETWEEN \nSYMMETRIC b_expr AND a_expr | a_expr NOT_LA BETWEEN SYMMETRIC b_expr AND \na_expr | a_expr IN_P in_expr | a_expr NOT_LA IN_P in_expr | a_expr \nsubquery_Op sub_type select_with_parens | a_expr subquery_Op sub_type \n'(' a_expr ')' | UNIQUE select_with_parens | a_expr IS DOCUMENT_P | \na_expr IS NOT DOCUMENT_P | a_expr IS NORMALIZED | a_expr IS \nunicode_normal_form NORMALIZED | a_expr IS NOT NORMALIZED | a_expr IS \nNOT unicode_normal_form NORMALIZED | DEFAULT\nb_expr ::= c_expr | b_expr TYPECAST Typename | '+' b_expr | '-' b_expr | \nb_expr '+' b_expr | b_expr '-' b_expr | b_expr '*' b_expr | b_expr '/' \nb_expr | b_expr '%' b_expr | b_expr '^' b_expr | b_expr '<' b_expr | \nb_expr '>' b_expr | b_expr '=' b_expr | b_expr LESS_EQUALS b_expr | \nb_expr GREATER_EQUALS b_expr | b_expr NOT_EQUALS b_expr | b_expr qual_Op \nb_expr | qual_Op b_expr | b_expr qual_Op | b_expr IS DISTINCT FROM \nb_expr | b_expr IS NOT DISTINCT FROM b_expr | b_expr IS OF '(' type_list \n')' | b_expr IS NOT OF '(' type_list ')' | b_expr IS DOCUMENT_P | b_expr \nIS NOT DOCUMENT_P\nc_expr ::= columnref | AexprConst | PARAM opt_indirection | '(' a_expr \n')' opt_indirection | case_expr | func_expr | select_with_parens | \nselect_with_parens indirection | EXISTS select_with_parens | ARRAY \nselect_with_parens | ARRAY array_expr | explicit_row | implicit_row | \nGROUPING '(' expr_list ')'\nfunc_application ::= func_name '(' ')' | func_name '(' func_arg_list \nopt_sort_clause ')' | func_name '(' VARIADIC func_arg_expr \nopt_sort_clause ')' | func_name '(' func_arg_list ',' VARIADIC \nfunc_arg_expr opt_sort_clause ')' | func_name '(' ALL func_arg_list \nopt_sort_clause ')' | func_name '(' DISTINCT func_arg_list \nopt_sort_clause ')' | func_name '(' '*' ')'\nfunc_expr ::= func_application within_group_clause filter_clause \nover_clause | func_expr_common_subexpr\nfunc_expr_windowless ::= func_application | func_expr_common_subexpr\nfunc_expr_common_subexpr ::= COLLATION FOR '(' a_expr ')' | CURRENT_DATE \n| CURRENT_TIME | CURRENT_TIME '(' Iconst ')' | CURRENT_TIMESTAMP | \nCURRENT_TIMESTAMP '(' Iconst ')' | LOCALTIME | LOCALTIME '(' Iconst ')' \n| LOCALTIMESTAMP | LOCALTIMESTAMP '(' Iconst ')' | CURRENT_ROLE | \nCURRENT_USER | SESSION_USER | USER | CURRENT_CATALOG | CURRENT_SCHEMA | \nCAST '(' a_expr AS Typename ')' | EXTRACT '(' extract_list ')' | \nNORMALIZE '(' a_expr ')' | NORMALIZE '(' a_expr ',' unicode_normal_form \n')' | OVERLAY '(' overlay_list ')' | POSITION '(' position_list ')' | \nSUBSTRING '(' substr_list ')' | TREAT '(' a_expr AS Typename ')' | TRIM \n'(' BOTH trim_list ')' | TRIM '(' LEADING trim_list ')' | TRIM '(' \nTRAILING trim_list ')' | TRIM '(' trim_list ')' | NULLIF '(' a_expr ',' \na_expr ')' | COALESCE '(' expr_list ')' | GREATEST '(' expr_list ')' | \nLEAST '(' expr_list ')' | XMLCONCAT '(' expr_list ')' | XMLELEMENT '(' \nNAME_P ColLabel ')' | XMLELEMENT '(' NAME_P ColLabel ',' xml_attributes \n')' | XMLELEMENT '(' NAME_P ColLabel ',' expr_list ')' | XMLELEMENT '(' \nNAME_P ColLabel ',' xml_attributes ',' expr_list ')' | XMLEXISTS '(' \nc_expr xmlexists_argument ')' | XMLFOREST '(' xml_attribute_list ')' | \nXMLPARSE '(' document_or_content a_expr xml_whitespace_option ')' | \nXMLPI '(' NAME_P ColLabel ')' | XMLPI '(' NAME_P ColLabel ',' a_expr ')' \n| XMLROOT '(' a_expr ',' xml_root_version opt_xml_root_standalone ')' | \nXMLSERIALIZE '(' document_or_content a_expr AS SimpleTypename ')'\nxml_root_version ::= VERSION_P a_expr | VERSION_P NO VALUE_P\nopt_xml_root_standalone ::= ',' STANDALONE_P YES_P | ',' STANDALONE_P NO \n| ',' STANDALONE_P NO VALUE_P |\nxml_attributes ::= XMLATTRIBUTES '(' xml_attribute_list ')'\nxml_attribute_list ::= xml_attribute_el | xml_attribute_list ',' \nxml_attribute_el\nxml_attribute_el ::= a_expr AS ColLabel | a_expr\ndocument_or_content ::= DOCUMENT_P | CONTENT_P\nxml_whitespace_option ::= PRESERVE WHITESPACE_P | STRIP_P WHITESPACE_P |\nxmlexists_argument ::= PASSING c_expr | PASSING c_expr xml_passing_mech \n| PASSING xml_passing_mech c_expr | PASSING xml_passing_mech c_expr \nxml_passing_mech\nxml_passing_mech ::= BY REF | BY VALUE_P\nwithin_group_clause ::= WITHIN GROUP_P '(' sort_clause ')' |\nfilter_clause ::= FILTER '(' WHERE a_expr ')' |\nwindow_clause ::= WINDOW window_definition_list |\nwindow_definition_list ::= window_definition | window_definition_list \n',' window_definition\nwindow_definition ::= ColId AS window_specification\nover_clause ::= OVER window_specification | OVER ColId |\nwindow_specification ::= '(' opt_existing_window_name \nopt_partition_clause opt_sort_clause opt_frame_clause ')'\nopt_existing_window_name ::= ColId |\nopt_partition_clause ::= PARTITION BY expr_list |\nopt_frame_clause ::= RANGE frame_extent opt_window_exclusion_clause | \nROWS frame_extent opt_window_exclusion_clause | GROUPS frame_extent \nopt_window_exclusion_clause |\nframe_extent ::= frame_bound | BETWEEN frame_bound AND frame_bound\nframe_bound ::= UNBOUNDED PRECEDING | UNBOUNDED FOLLOWING | CURRENT_P \nROW | a_expr PRECEDING | a_expr FOLLOWING\nopt_window_exclusion_clause ::= EXCLUDE CURRENT_P ROW | EXCLUDE GROUP_P \n| EXCLUDE TIES | EXCLUDE NO OTHERS |\nrow ::= ROW '(' expr_list ')' | ROW '(' ')' | '(' expr_list ',' a_expr ')'\nexplicit_row ::= ROW '(' expr_list ')' | ROW '(' ')'\nimplicit_row ::= '(' expr_list ',' a_expr ')'\nsub_type ::= ANY | SOME | ALL\nall_Op ::= Op | MathOp\nMathOp ::= '+' | '-' | '*' | '/' | '%' | '^' | '<' | '>' | '=' | \nLESS_EQUALS | GREATER_EQUALS | NOT_EQUALS\nqual_Op ::= Op | OPERATOR '(' any_operator ')'\nqual_all_Op ::= all_Op | OPERATOR '(' any_operator ')'\nsubquery_Op ::= all_Op | OPERATOR '(' any_operator ')' | LIKE | NOT_LA \nLIKE | ILIKE | NOT_LA ILIKE\nexpr_list ::= a_expr | expr_list ',' a_expr\nfunc_arg_list ::= func_arg_expr | func_arg_list ',' func_arg_expr\nfunc_arg_expr ::= a_expr | param_name COLON_EQUALS a_expr | param_name \nEQUALS_GREATER a_expr\ntype_list ::= Typename | type_list ',' Typename\narray_expr ::= '[' expr_list ']' | '[' array_expr_list ']' | '[' ']'\narray_expr_list ::= array_expr | array_expr_list ',' array_expr\nextract_list ::= extract_arg FROM a_expr |\nextract_arg ::= IDENT | YEAR_P | MONTH_P | DAY_P | HOUR_P | MINUTE_P | \nSECOND_P | Sconst\nunicode_normal_form ::= NFC | NFD | NFKC | NFKD\noverlay_list ::= a_expr overlay_placing substr_from substr_for | a_expr \noverlay_placing substr_from\noverlay_placing ::= PLACING a_expr\nposition_list ::= b_expr IN_P b_expr |\nsubstr_list ::= a_expr substr_from substr_for | a_expr substr_for \nsubstr_from | a_expr substr_from | a_expr substr_for | expr_list |\nsubstr_from ::= FROM a_expr\nsubstr_for ::= FOR a_expr\ntrim_list ::= a_expr FROM expr_list | FROM expr_list | expr_list\nin_expr ::= select_with_parens | '(' expr_list ')'\ncase_expr ::= CASE case_arg when_clause_list case_default END_P\nwhen_clause_list ::= when_clause | when_clause_list when_clause\nwhen_clause ::= WHEN a_expr THEN a_expr\ncase_default ::= ELSE a_expr |\ncase_arg ::= a_expr |\ncolumnref ::= ColId | ColId indirection\nindirection_el ::= '.' attr_name | '.' '*' | '[' a_expr ']' | '[' \nopt_slice_bound ':' opt_slice_bound ']'\nopt_slice_bound ::= a_expr |\nindirection ::= indirection_el | indirection indirection_el\nopt_indirection ::= | opt_indirection indirection_el\nopt_asymmetric ::= ASYMMETRIC | /*empty*/\nopt_target_list ::= target_list |\ntarget_list ::= target_el | target_list ',' target_el\ntarget_el ::= a_expr AS ColLabel | a_expr IDENT | a_expr | '*'\nqualified_name_list ::= qualified_name | qualified_name_list ',' \nqualified_name\nqualified_name ::= ColId | ColId indirection\nname_list ::= name | name_list ',' name\nname ::= ColId\ndatabase_name ::= ColId\naccess_method ::= ColId\nattr_name ::= ColLabel\nindex_name ::= ColId\nfile_name ::= Sconst\nfunc_name ::= type_function_name | ColId indirection\nAexprConst ::= Iconst | FCONST | Sconst | BCONST | XCONST | func_name \nSconst | func_name '(' func_arg_list opt_sort_clause ')' Sconst | \nConstTypename Sconst | ConstInterval Sconst opt_interval | ConstInterval \n'(' Iconst ')' Sconst | TRUE_P | FALSE_P | NULL_P\nIconst ::= ICONST\nSconst ::= SCONST\nSignedIconst ::= Iconst | '+' Iconst | '-' Iconst\nRoleId ::= RoleSpec\nRoleSpec ::= NonReservedWord | CURRENT_USER | SESSION_USER\nrole_list ::= RoleSpec | role_list ',' RoleSpec\nColId ::= IDENT | unreserved_keyword | col_name_keyword\ntype_function_name ::= IDENT | unreserved_keyword | type_func_name_keyword\nNonReservedWord ::= IDENT | unreserved_keyword | col_name_keyword | \ntype_func_name_keyword\nColLabel ::= IDENT | unreserved_keyword | col_name_keyword | \ntype_func_name_keyword | reserved_keyword\nunreserved_keyword ::= ABORT_P | ABSOLUTE_P | ACCESS | ACTION | ADD_P | \nADMIN | AFTER | AGGREGATE | ALSO | ALTER | ALWAYS | ASSERTION | \nASSIGNMENT | AT | ATTACH | ATTRIBUTE | BACKWARD | BEFORE | BEGIN_P | BY \n| CACHE | CALL | CALLED | CASCADE | CASCADED | CATALOG_P | CHAIN | \nCHARACTERISTICS | CHECKPOINT | CLASS | CLOSE | CLUSTER | COLUMNS | \nCOMMENT | COMMENTS | COMMIT | COMMITTED | CONFIGURATION | CONFLICT | \nCONNECTION | CONSTRAINTS | CONTENT_P | CONTINUE_P | CONVERSION_P | COPY \n| COST | CSV | CUBE | CURRENT_P | CURSOR | CYCLE | DATA_P | DATABASE | \nDAY_P | DEALLOCATE | DECLARE | DEFAULTS | DEFERRED | DEFINER | DELETE_P \n| DELIMITER | DELIMITERS | DEPENDS | DETACH | DICTIONARY | DISABLE_P | \nDISCARD | DOCUMENT_P | DOMAIN_P | DOUBLE_P | DROP | EACH | ENABLE_P | \nENCODING | ENCRYPTED | ENUM_P | ESCAPE | EVENT | EXCLUDE | EXCLUDING | \nEXCLUSIVE | EXECUTE | EXPLAIN | EXPRESSION | EXTENSION | EXTERNAL | \nFAMILY | FILTER | FIRST_P | FOLLOWING | FORCE | FORWARD | FUNCTION | \nFUNCTIONS | GENERATED | GLOBAL | GRANTED | GROUPS | HANDLER | HEADER_P | \nHOLD | HOUR_P | IDENTITY_P | IF_P | IMMEDIATE | IMMUTABLE | IMPLICIT_P | \nIMPORT_P | INCLUDE | INCLUDING | INCREMENT | INDEX | INDEXES | INHERIT | \nINHERITS | INLINE_P | INPUT_P | INSENSITIVE | INSERT | INSTEAD | INVOKER \n| ISOLATION | KEY | LABEL | LANGUAGE | LARGE_P | LAST_P | LEAKPROOF | \nLEVEL | LISTEN | LOAD | LOCAL | LOCATION | LOCK_P | LOCKED | LOGGED | \nMAPPING | MATCH | MATERIALIZED | MAXVALUE | METHOD | MINUTE_P | MINVALUE \n| MODE | MONTH_P | MOVE | NAME_P | NAMES | NEW | NEXT | NFC | NFD | NFKC \n| NFKD | NO | NORMALIZED | NOTHING | NOTIFY | NOWAIT | NULLS_P | \nOBJECT_P | OF | OFF | OIDS | OLD | OPERATOR | OPTION | OPTIONS | \nORDINALITY | OTHERS | OVER | OVERRIDING | OWNED | OWNER | PARALLEL | \nPARSER | PARTIAL | PARTITION | PASSING | PASSWORD | PLANS | POLICY | \nPRECEDING | PREPARE | PREPARED | PRESERVE | PRIOR | PRIVILEGES | \nPROCEDURAL | PROCEDURE | PROCEDURES | PROGRAM | PUBLICATION | QUOTE | \nRANGE | READ | REASSIGN | RECHECK | RECURSIVE | REF | REFERENCING | \nREFRESH | REINDEX | RELATIVE_P | RELEASE | RENAME | REPEATABLE | REPLACE \n| REPLICA | RESET | RESTART | RESTRICT | RETURNS | REVOKE | ROLE | \nROLLBACK | ROLLUP | ROUTINE | ROUTINES | ROWS | RULE | SAVEPOINT | \nSCHEMA | SCHEMAS | SCROLL | SEARCH | SECOND_P | SECURITY | SEQUENCE | \nSEQUENCES | SERIALIZABLE | SERVER | SESSION | SET | SETS | SHARE | SHOW \n| SIMPLE | SKIP | SNAPSHOT | SQL_P | STABLE | STANDALONE_P | START | \nSTATEMENT | STATISTICS | STDIN | STDOUT | STORAGE | STORED | STRICT_P | \nSTRIP_P | SUBSCRIPTION | SUPPORT | SYSID | SYSTEM_P | TABLES | \nTABLESPACE | TEMP | TEMPLATE | TEMPORARY | TEXT_P | TIES | TRANSACTION | \nTRANSFORM | TRIGGER | TRUNCATE | TRUSTED | TYPE_P | TYPES_P | UESCAPE | \nUNBOUNDED | UNCOMMITTED | UNENCRYPTED | UNKNOWN | UNLISTEN | UNLOGGED | \nUNTIL | UPDATE | VACUUM | VALID | VALIDATE | VALIDATOR | VALUE_P | \nVARYING | VERSION_P | VIEW | VIEWS | VOLATILE | WHITESPACE_P | WITHIN | \nWITHOUT | WORK | WRAPPER | WRITE | XML_P | YEAR_P | YES_P | ZONE\ncol_name_keyword ::= BETWEEN | BIGINT | BIT | BOOLEAN_P | CHAR_P | \nCHARACTER | COALESCE | DEC | DECIMAL_P | EXISTS | EXTRACT | FLOAT_P | \nGREATEST | GROUPING | INOUT | INT_P | INTEGER | INTERVAL | LEAST | \nNATIONAL | NCHAR | NONE | NORMALIZE | NULLIF | NUMERIC | OUT_P | OVERLAY \n| POSITION | PRECISION | REAL | ROW | SETOF | SMALLINT | SUBSTRING | \nTIME | TIMESTAMP | TREAT | TRIM | VALUES | VARCHAR | XMLATTRIBUTES | \nXMLCONCAT | XMLELEMENT | XMLEXISTS | XMLFOREST | XMLNAMESPACES | \nXMLPARSE | XMLPI | XMLROOT | XMLSERIALIZE | XMLTABLE\ntype_func_name_keyword ::= AUTHORIZATION | BINARY | COLLATION | \nCONCURRENTLY | CROSS | CURRENT_SCHEMA | FREEZE | FULL | ILIKE | INNER_P \n| IS | ISNULL | JOIN | LEFT | LIKE | NATURAL | NOTNULL | OUTER_P | \nOVERLAPS | RIGHT | SIMILAR | TABLESAMPLE | VERBOSE\nreserved_keyword ::= ALL | ANALYSE | ANALYZE | AND | ANY | ARRAY | AS | \nASC | ASYMMETRIC | BOTH | CASE | CAST | CHECK | COLLATE | COLUMN | \nCONSTRAINT | CREATE | CURRENT_CATALOG | CURRENT_DATE | CURRENT_ROLE | \nCURRENT_TIME | CURRENT_TIMESTAMP | CURRENT_USER | DEFAULT | DEFERRABLE | \nDESC | DISTINCT | DO | ELSE | END_P | EXCEPT | FALSE_P | FETCH | FOR | \nFOREIGN | FROM | GRANT | GROUP_P | HAVING | IN_P | INITIALLY | INTERSECT \n| INTO | LATERAL_P | LEADING | LIMIT | LOCALTIME | LOCALTIMESTAMP | NOT \n| NULL_P | OFFSET | ON | ONLY | OR | ORDER | PLACING | PRIMARY | \nREFERENCES | RETURNING | SELECT | SESSION_USER | SOME | SYMMETRIC | \nTABLE | THEN | TO | TRAILING | TRUE_P | UNION | UNIQUE | USER | USING | \nVARIADIC | WHEN | WHERE | WINDOW | WITH\n\n// Tokens from postgresql-13.3/src/include/parser/kwlist.h\n\nABORT_P ::= \"abort\"\nABSOLUTE_P ::= \"absolute\"\nACCESS ::= \"access\"\nACTION ::= \"action\"\nADD_P ::= \"add\"\nADMIN ::= \"admin\"\nAFTER ::= \"after\"\nAGGREGATE ::= \"aggregate\"\nALL ::= \"all\"\nALSO ::= \"also\"\nALTER ::= \"alter\"\nALWAYS ::= \"always\"\nANALYSE ::= \"analyse\"\nANALYZE ::= \"analyze\"\nAND ::= \"and\"\nANY ::= \"any\"\nARRAY ::= \"array\"\nAS ::= \"as\"\nASC ::= \"asc\"\nASSERTION ::= \"assertion\"\nASSIGNMENT ::= \"assignment\"\nASYMMETRIC ::= \"asymmetric\"\nAT ::= \"at\"\nATTACH ::= \"attach\"\nATTRIBUTE ::= \"attribute\"\nAUTHORIZATION ::= \"authorization\"\nBACKWARD ::= \"backward\"\nBEFORE ::= \"before\"\nBEGIN_P ::= \"begin\"\nBETWEEN ::= \"between\"\nBIGINT ::= \"bigint\"\nBINARY ::= \"binary\"\nBIT ::= \"bit\"\nBOOLEAN_P ::= \"boolean\"\nBOTH ::= \"both\"\nBY ::= \"by\"\nCACHE ::= \"cache\"\nCALL ::= \"call\"\nCALLED ::= \"called\"\nCASCADE ::= \"cascade\"\nCASCADED ::= \"cascaded\"\nCASE ::= \"case\"\nCAST ::= \"cast\"\nCATALOG_P ::= \"catalog\"\nCHAIN ::= \"chain\"\nCHAR_P ::= \"char\"\nCHARACTER ::= \"character\"\nCHARACTERISTICS ::= \"characteristics\"\nCHECK ::= \"check\"\nCHECKPOINT ::= \"checkpoint\"\nCLASS ::= \"class\"\nCLOSE ::= \"close\"\nCLUSTER ::= \"cluster\"\nCOALESCE ::= \"coalesce\"\nCOLLATE ::= \"collate\"\nCOLLATION ::= \"collation\"\nCOLUMN ::= \"column\"\nCOLUMNS ::= \"columns\"\nCOMMENT ::= \"comment\"\nCOMMENTS ::= \"comments\"\nCOMMIT ::= \"commit\"\nCOMMITTED ::= \"committed\"\nCONCURRENTLY ::= \"concurrently\"\nCONFIGURATION ::= \"configuration\"\nCONFLICT ::= \"conflict\"\nCONNECTION ::= \"connection\"\nCONSTRAINT ::= \"constraint\"\nCONSTRAINTS ::= \"constraints\"\nCONTENT_P ::= \"content\"\nCONTINUE_P ::= \"continue\"\nCONVERSION_P ::= \"conversion\"\nCOPY ::= \"copy\"\nCOST ::= \"cost\"\nCREATE ::= \"create\"\nCROSS ::= \"cross\"\nCSV ::= \"csv\"\nCUBE ::= \"cube\"\nCURRENT_P ::= \"current\"\nCURRENT_CATALOG ::= \"current_catalog\"\nCURRENT_DATE ::= \"current_date\"\nCURRENT_ROLE ::= \"current_role\"\nCURRENT_SCHEMA ::= \"current_schema\"\nCURRENT_TIME ::= \"current_time\"\nCURRENT_TIMESTAMP ::= \"current_timestamp\"\nCURRENT_USER ::= \"current_user\"\nCURSOR ::= \"cursor\"\nCYCLE ::= \"cycle\"\nDATA_P ::= \"data\"\nDATABASE ::= \"database\"\nDAY_P ::= \"day\"\nDEALLOCATE ::= \"deallocate\"\nDEC ::= \"dec\"\nDECIMAL_P ::= \"decimal\"\nDECLARE ::= \"declare\"\nDEFAULT ::= \"default\"\nDEFAULTS ::= \"defaults\"\nDEFERRABLE ::= \"deferrable\"\nDEFERRED ::= \"deferred\"\nDEFINER ::= \"definer\"\nDELETE_P ::= \"delete\"\nDELIMITER ::= \"delimiter\"\nDELIMITERS ::= \"delimiters\"\nDEPENDS ::= \"depends\"\nDESC ::= \"desc\"\nDETACH ::= \"detach\"\nDICTIONARY ::= \"dictionary\"\nDISABLE_P ::= \"disable\"\nDISCARD ::= \"discard\"\nDISTINCT ::= \"distinct\"\nDO ::= \"do\"\nDOCUMENT_P ::= \"document\"\nDOMAIN_P ::= \"domain\"\nDOUBLE_P ::= \"double\"\nDROP ::= \"drop\"\nEACH ::= \"each\"\nELSE ::= \"else\"\nENABLE_P ::= \"enable\"\nENCODING ::= \"encoding\"\nENCRYPTED ::= \"encrypted\"\nEND_P ::= \"end\"\nENUM_P ::= \"enum\"\nESCAPE ::= \"escape\"\nEVENT ::= \"event\"\nEXCEPT ::= \"except\"\nEXCLUDE ::= \"exclude\"\nEXCLUDING ::= \"excluding\"\nEXCLUSIVE ::= \"exclusive\"\nEXECUTE ::= \"execute\"\nEXISTS ::= \"exists\"\nEXPLAIN ::= \"explain\"\nEXPRESSION ::= \"expression\"\nEXTENSION ::= \"extension\"\nEXTERNAL ::= \"external\"\nEXTRACT ::= \"extract\"\nFALSE_P ::= \"false\"\nFAMILY ::= \"family\"\nFETCH ::= \"fetch\"\nFILTER ::= \"filter\"\nFIRST_P ::= \"first\"\nFLOAT_P ::= \"float\"\nFOLLOWING ::= \"following\"\nFOR ::= \"for\"\nFORCE ::= \"force\"\nFOREIGN ::= \"foreign\"\nFORWARD ::= \"forward\"\nFREEZE ::= \"freeze\"\nFROM ::= \"from\"\nFULL ::= \"full\"\nFUNCTION ::= \"function\"\nFUNCTIONS ::= \"functions\"\nGENERATED ::= \"generated\"\nGLOBAL ::= \"global\"\nGRANT ::= \"grant\"\nGRANTED ::= \"granted\"\nGREATEST ::= \"greatest\"\nGROUP_P ::= \"group\"\nGROUPING ::= \"grouping\"\nGROUPS ::= \"groups\"\nHANDLER ::= \"handler\"\nHAVING ::= \"having\"\nHEADER_P ::= \"header\"\nHOLD ::= \"hold\"\nHOUR_P ::= \"hour\"\nIDENTITY_P ::= \"identity\"\nIF_P ::= \"if\"\nILIKE ::= \"ilike\"\nIMMEDIATE ::= \"immediate\"\nIMMUTABLE ::= \"immutable\"\nIMPLICIT_P ::= \"implicit\"\nIMPORT_P ::= \"import\"\nIN_P ::= \"in\"\nINCLUDE ::= \"include\"\nINCLUDING ::= \"including\"\nINCREMENT ::= \"increment\"\nINDEX ::= \"index\"\nINDEXES ::= \"indexes\"\nINHERIT ::= \"inherit\"\nINHERITS ::= \"inherits\"\nINITIALLY ::= \"initially\"\nINLINE_P ::= \"inline\"\nINNER_P ::= \"inner\"\nINOUT ::= \"inout\"\nINPUT_P ::= \"input\"\nINSENSITIVE ::= \"insensitive\"\nINSERT ::= \"insert\"\nINSTEAD ::= \"instead\"\nINT_P ::= \"int\"\nINTEGER ::= \"integer\"\nINTERSECT ::= \"intersect\"\nINTERVAL ::= \"interval\"\nINTO ::= \"into\"\nINVOKER ::= \"invoker\"\nIS ::= \"is\"\nISNULL ::= \"isnull\"\nISOLATION ::= \"isolation\"\nJOIN ::= \"join\"\nKEY ::= \"key\"\nLABEL ::= \"label\"\nLANGUAGE ::= \"language\"\nLARGE_P ::= \"large\"\nLAST_P ::= \"last\"\nLATERAL_P ::= \"lateral\"\nLEADING ::= \"leading\"\nLEAKPROOF ::= \"leakproof\"\nLEAST ::= \"least\"\nLEFT ::= \"left\"\nLEVEL ::= \"level\"\nLIKE ::= \"like\"\nLIMIT ::= \"limit\"\nLISTEN ::= \"listen\"\nLOAD ::= \"load\"\nLOCAL ::= \"local\"\nLOCALTIME ::= \"localtime\"\nLOCALTIMESTAMP ::= \"localtimestamp\"\nLOCATION ::= \"location\"\nLOCK_P ::= \"lock\"\nLOCKED ::= \"locked\"\nLOGGED ::= \"logged\"\nMAPPING ::= \"mapping\"\nMATCH ::= \"match\"\nMATERIALIZED ::= \"materialized\"\nMAXVALUE ::= \"maxvalue\"\nMETHOD ::= \"method\"\nMINUTE_P ::= \"minute\"\nMINVALUE ::= \"minvalue\"\nMODE ::= \"mode\"\nMONTH_P ::= \"month\"\nMOVE ::= \"move\"\nNAME_P ::= \"name\"\nNAMES ::= \"names\"\nNATIONAL ::= \"national\"\nNATURAL ::= \"natural\"\nNCHAR ::= \"nchar\"\nNEW ::= \"new\"\nNEXT ::= \"next\"\nNFC ::= \"nfc\"\nNFD ::= \"nfd\"\nNFKC ::= \"nfkc\"\nNFKD ::= \"nfkd\"\nNO ::= \"no\"\nNONE ::= \"none\"\nNORMALIZE ::= \"normalize\"\nNORMALIZED ::= \"normalized\"\nNOT ::= \"not\"\nNOTHING ::= \"nothing\"\nNOTIFY ::= \"notify\"\nNOTNULL ::= \"notnull\"\nNOWAIT ::= \"nowait\"\nNULL_P ::= \"null\"\nNULLIF ::= \"nullif\"\nNULLS_P ::= \"nulls\"\nNUMERIC ::= \"numeric\"\nOBJECT_P ::= \"object\"\nOF ::= \"of\"\nOFF ::= \"off\"\nOFFSET ::= \"offset\"\nOIDS ::= \"oids\"\nOLD ::= \"old\"\nON ::= \"on\"\nONLY ::= \"only\"\nOPERATOR ::= \"operator\"\nOPTION ::= \"option\"\nOPTIONS ::= \"options\"\nOR ::= \"or\"\nORDER ::= \"order\"\nORDINALITY ::= \"ordinality\"\nOTHERS ::= \"others\"\nOUT_P ::= \"out\"\nOUTER_P ::= \"outer\"\nOVER ::= \"over\"\nOVERLAPS ::= \"overlaps\"\nOVERLAY ::= \"overlay\"\nOVERRIDING ::= \"overriding\"\nOWNED ::= \"owned\"\nOWNER ::= \"owner\"\nPARALLEL ::= \"parallel\"\nPARSER ::= \"parser\"\nPARTIAL ::= \"partial\"\nPARTITION ::= \"partition\"\nPASSING ::= \"passing\"\nPASSWORD ::= \"password\"\nPLACING ::= \"placing\"\nPLANS ::= \"plans\"\nPOLICY ::= \"policy\"\nPOSITION ::= \"position\"\nPRECEDING ::= \"preceding\"\nPRECISION ::= \"precision\"\nPREPARE ::= \"prepare\"\nPREPARED ::= \"prepared\"\nPRESERVE ::= \"preserve\"\nPRIMARY ::= \"primary\"\nPRIOR ::= \"prior\"\nPRIVILEGES ::= \"privileges\"\nPROCEDURAL ::= \"procedural\"\nPROCEDURE ::= \"procedure\"\nPROCEDURES ::= \"procedures\"\nPROGRAM ::= \"program\"\nPUBLICATION ::= \"publication\"\nQUOTE ::= \"quote\"\nRANGE ::= \"range\"\nREAD ::= \"read\"\nREAL ::= \"real\"\nREASSIGN ::= \"reassign\"\nRECHECK ::= \"recheck\"\nRECURSIVE ::= \"recursive\"\nREF ::= \"ref\"\nREFERENCES ::= \"references\"\nREFERENCING ::= \"referencing\"\nREFRESH ::= \"refresh\"\nREINDEX ::= \"reindex\"\nRELATIVE_P ::= \"relative\"\nRELEASE ::= \"release\"\nRENAME ::= \"rename\"\nREPEATABLE ::= \"repeatable\"\nREPLACE ::= \"replace\"\nREPLICA ::= \"replica\"\nRESET ::= \"reset\"\nRESTART ::= \"restart\"\nRESTRICT ::= \"restrict\"\nRETURNING ::= \"returning\"\nRETURNS ::= \"returns\"\nREVOKE ::= \"revoke\"\nRIGHT ::= \"right\"\nROLE ::= \"role\"\nROLLBACK ::= \"rollback\"\nROLLUP ::= \"rollup\"\nROUTINE ::= \"routine\"\nROUTINES ::= \"routines\"\nROW ::= \"row\"\nROWS ::= \"rows\"\nRULE ::= \"rule\"\nSAVEPOINT ::= \"savepoint\"\nSCHEMA ::= \"schema\"\nSCHEMAS ::= \"schemas\"\nSCROLL ::= \"scroll\"\nSEARCH ::= \"search\"\nSECOND_P ::= \"second\"\nSECURITY ::= \"security\"\nSELECT ::= \"select\"\nSEQUENCE ::= \"sequence\"\nSEQUENCES ::= \"sequences\"\nSERIALIZABLE ::= \"serializable\"\nSERVER ::= \"server\"\nSESSION ::= \"session\"\nSESSION_USER ::= \"session_user\"\nSET ::= \"set\"\nSETOF ::= \"setof\"\nSETS ::= \"sets\"\nSHARE ::= \"share\"\nSHOW ::= \"show\"\nSIMILAR ::= \"similar\"\nSIMPLE ::= \"simple\"\nSKIP ::= \"skip\"\nSMALLINT ::= \"smallint\"\nSNAPSHOT ::= \"snapshot\"\nSOME ::= \"some\"\nSQL_P ::= \"sql\"\nSTABLE ::= \"stable\"\nSTANDALONE_P ::= \"standalone\"\nSTART ::= \"start\"\nSTATEMENT ::= \"statement\"\nSTATISTICS ::= \"statistics\"\nSTDIN ::= \"stdin\"\nSTDOUT ::= \"stdout\"\nSTORAGE ::= \"storage\"\nSTORED ::= \"stored\"\nSTRICT_P ::= \"strict\"\nSTRIP_P ::= \"strip\"\nSUBSCRIPTION ::= \"subscription\"\nSUBSTRING ::= \"substring\"\nSUPPORT ::= \"support\"\nSYMMETRIC ::= \"symmetric\"\nSYSID ::= \"sysid\"\nSYSTEM_P ::= \"system\"\nTABLE ::= \"table\"\nTABLES ::= \"tables\"\nTABLESAMPLE ::= \"tablesample\"\nTABLESPACE ::= \"tablespace\"\nTEMP ::= \"temp\"\nTEMPLATE ::= \"template\"\nTEMPORARY ::= \"temporary\"\nTEXT_P ::= \"text\"\nTHEN ::= \"then\"\nTIES ::= \"ties\"\nTIME ::= \"time\"\nTIMESTAMP ::= \"timestamp\"\nTO ::= \"to\"\nTRAILING ::= \"trailing\"\nTRANSACTION ::= \"transaction\"\nTRANSFORM ::= \"transform\"\nTREAT ::= \"treat\"\nTRIGGER ::= \"trigger\"\nTRIM ::= \"trim\"\nTRUE_P ::= \"true\"\nTRUNCATE ::= \"truncate\"\nTRUSTED ::= \"trusted\"\nTYPE_P ::= \"type\"\nTYPES_P ::= \"types\"\nUESCAPE ::= \"uescape\"\nUNBOUNDED ::= \"unbounded\"\nUNCOMMITTED ::= \"uncommitted\"\nUNENCRYPTED ::= \"unencrypted\"\nUNION ::= \"union\"\nUNIQUE ::= \"unique\"\nUNKNOWN ::= \"unknown\"\nUNLISTEN ::= \"unlisten\"\nUNLOGGED ::= \"unlogged\"\nUNTIL ::= \"until\"\nUPDATE ::= \"update\"\nUSER ::= \"user\"\nUSING ::= \"using\"\nVACUUM ::= \"vacuum\"\nVALID ::= \"valid\"\nVALIDATE ::= \"validate\"\nVALIDATOR ::= \"validator\"\nVALUE_P ::= \"value\"\nVALUES ::= \"values\"\nVARCHAR ::= \"varchar\"\nVARIADIC ::= \"variadic\"\nVARYING ::= \"varying\"\nVERBOSE ::= \"verbose\"\nVERSION_P ::= \"version\"\nVIEW ::= \"view\"\nVIEWS ::= \"views\"\nVOLATILE ::= \"volatile\"\nWHEN ::= \"when\"\nWHERE ::= \"where\"\nWHITESPACE_P ::= \"whitespace\"\nWINDOW ::= \"window\"\nWITH ::= \"with\"\nWITHIN ::= \"within\"\nWITHOUT ::= \"without\"\nWORK ::= \"work\"\nWRAPPER ::= \"wrapper\"\nWRITE ::= \"write\"\nXML_P ::= \"xml\"\nXMLATTRIBUTES ::= \"xmlattributes\"\nXMLCONCAT ::= \"xmlconcat\"\nXMLELEMENT ::= \"xmlelement\"\nXMLEXISTS ::= \"xmlexists\"\nXMLFOREST ::= \"xmlforest\"\nXMLNAMESPACES ::= \"xmlnamespaces\"\nXMLPARSE ::= \"xmlparse\"\nXMLPI ::= \"xmlpi\"\nXMLROOT ::= \"xmlroot\"\nXMLSERIALIZE ::= \"xmlserialize\"\nXMLTABLE ::= \"xmltable\"\nYEAR_P ::= \"year\"\nYES_P ::= \"yes\"\nZONE ::= \"zone\"\n\n====\n\n\n\n\n\n\n\nI've done a experimental tool to\n convert bison grammars to a kind of EBNF understood by https://www.bottlecaps.de/rr/ui to generate railroad\n diagrams see bellow the converted 'postgresql-13.3/src/backend/parser/gram.y' and with some hand\n made changes to allow view it at https://www.bottlecaps.de/rr/ui the order of the rules\n could be changed to a better view of the railroad diagrams. Copy\n and paste the EBNF bellow on https://www.bottlecaps.de/rr/ui tab Edit Grammar then\n switch to the tab View Diagram.\n====\n/*\n From postgresql-13.3/src/backend/parser/gram.y\n */\n\n stmtblock ::= stmtmulti\n stmtmulti ::= stmtmulti ';' stmt | stmt\n stmt ::= AlterEventTrigStmt | AlterCollationStmt |\n AlterDatabaseStmt | AlterDatabaseSetStmt |\n AlterDefaultPrivilegesStmt | AlterDomainStmt | AlterEnumStmt |\n AlterExtensionStmt | AlterExtensionContentsStmt | AlterFdwStmt |\n AlterForeignServerStmt | AlterForeignTableStmt |\n AlterFunctionStmt | AlterGroupStmt | AlterObjectDependsStmt |\n AlterObjectSchemaStmt | AlterOwnerStmt | AlterOperatorStmt |\n AlterTypeStmt | AlterPolicyStmt | AlterSeqStmt | AlterSystemStmt\n | AlterTableStmt | AlterTblSpcStmt | AlterCompositeTypeStmt |\n AlterPublicationStmt | AlterRoleSetStmt | AlterRoleStmt |\n AlterSubscriptionStmt | AlterStatsStmt |\n AlterTSConfigurationStmt | AlterTSDictionaryStmt |\n AlterUserMappingStmt | AnalyzeStmt | CallStmt | CheckPointStmt |\n ClosePortalStmt | ClusterStmt | CommentStmt | ConstraintsSetStmt\n | CopyStmt | CreateAmStmt | CreateAsStmt | CreateAssertionStmt |\n CreateCastStmt | CreateConversionStmt | CreateDomainStmt |\n CreateExtensionStmt | CreateFdwStmt | CreateForeignServerStmt |\n CreateForeignTableStmt | CreateFunctionStmt | CreateGroupStmt |\n CreateMatViewStmt | CreateOpClassStmt | CreateOpFamilyStmt |\n CreatePublicationStmt | AlterOpFamilyStmt | CreatePolicyStmt |\n CreatePLangStmt | CreateSchemaStmt | CreateSeqStmt | CreateStmt\n | CreateSubscriptionStmt | CreateStatsStmt |\n CreateTableSpaceStmt | CreateTransformStmt | CreateTrigStmt |\n CreateEventTrigStmt | CreateRoleStmt | CreateUserStmt |\n CreateUserMappingStmt | CreatedbStmt | DeallocateStmt |\n DeclareCursorStmt | DefineStmt | DeleteStmt | DiscardStmt |\n DoStmt | DropCastStmt | DropOpClassStmt | DropOpFamilyStmt |\n DropOwnedStmt | DropPLangStmt | DropStmt | DropSubscriptionStmt\n | DropTableSpaceStmt | DropTransformStmt | DropRoleStmt |\n DropUserMappingStmt | DropdbStmt | ExecuteStmt | ExplainStmt |\n FetchStmt | GrantStmt | GrantRoleStmt | ImportForeignSchemaStmt\n | IndexStmt | InsertStmt | ListenStmt | RefreshMatViewStmt |\n LoadStmt | LockStmt | NotifyStmt | PrepareStmt |\n ReassignOwnedStmt | ReindexStmt | RemoveAggrStmt |\n RemoveFuncStmt | RemoveOperStmt | RenameStmt | RevokeStmt |\n RevokeRoleStmt | RuleStmt | SecLabelStmt | SelectStmt |\n TransactionStmt | TruncateStmt | UnlistenStmt | UpdateStmt |\n VacuumStmt | VariableResetStmt | VariableSetStmt |\n VariableShowStmt | ViewStmt |\n CallStmt ::= CALL func_application\n CreateRoleStmt ::= CREATE ROLE RoleId opt_with OptRoleList\n opt_with ::= WITH | WITH_LA |\n OptRoleList ::= OptRoleList CreateOptRoleElem |\n AlterOptRoleList ::= AlterOptRoleList AlterOptRoleElem |\n AlterOptRoleElem ::= PASSWORD Sconst | PASSWORD NULL_P |\n ENCRYPTED PASSWORD Sconst | UNENCRYPTED PASSWORD Sconst |\n INHERIT | CONNECTION LIMIT SignedIconst | VALID UNTIL Sconst |\n USER role_list | IDENT\n CreateOptRoleElem ::= AlterOptRoleElem | SYSID Iconst | ADMIN\n role_list | ROLE role_list | IN_P ROLE role_list | IN_P GROUP_P\n role_list\n CreateUserStmt ::= CREATE USER RoleId opt_with OptRoleList\n AlterRoleStmt ::= ALTER ROLE RoleSpec opt_with AlterOptRoleList\n | ALTER USER RoleSpec opt_with AlterOptRoleList\n opt_in_database ::= | IN_P DATABASE database_name\n AlterRoleSetStmt ::= ALTER ROLE RoleSpec opt_in_database\n SetResetClause | ALTER ROLE ALL opt_in_database SetResetClause |\n ALTER USER RoleSpec opt_in_database SetResetClause | ALTER USER\n ALL opt_in_database SetResetClause\n DropRoleStmt ::= DROP ROLE role_list | DROP ROLE IF_P EXISTS\n role_list | DROP USER role_list | DROP USER IF_P EXISTS\n role_list | DROP GROUP_P role_list | DROP GROUP_P IF_P EXISTS\n role_list\n CreateGroupStmt ::= CREATE GROUP_P RoleId opt_with OptRoleList\n AlterGroupStmt ::= ALTER GROUP_P RoleSpec add_drop USER\n role_list\n add_drop ::= ADD_P | DROP\n CreateSchemaStmt ::= CREATE SCHEMA OptSchemaName AUTHORIZATION\n RoleSpec OptSchemaEltList | CREATE SCHEMA ColId OptSchemaEltList\n | CREATE SCHEMA IF_P NOT EXISTS OptSchemaName AUTHORIZATION\n RoleSpec OptSchemaEltList | CREATE SCHEMA IF_P NOT EXISTS ColId\n OptSchemaEltList\n OptSchemaName ::= ColId |\n OptSchemaEltList ::= OptSchemaEltList schema_stmt |\n schema_stmt ::= CreateStmt | IndexStmt | CreateSeqStmt |\n CreateTrigStmt | GrantStmt | ViewStmt\n VariableSetStmt ::= SET set_rest | SET LOCAL set_rest | SET\n SESSION set_rest\n set_rest ::= TRANSACTION transaction_mode_list | SESSION\n CHARACTERISTICS AS TRANSACTION transaction_mode_list |\n set_rest_more\n generic_set ::= var_name TO var_list | var_name '=' var_list |\n var_name TO DEFAULT | var_name '=' DEFAULT\n set_rest_more ::= generic_set | var_name FROM CURRENT_P | TIME\n ZONE zone_value | CATALOG_P Sconst | SCHEMA Sconst | NAMES\n opt_encoding | ROLE NonReservedWord_or_Sconst | SESSION\n AUTHORIZATION NonReservedWord_or_Sconst | SESSION AUTHORIZATION\n DEFAULT | XML_P OPTION document_or_content | TRANSACTION\n SNAPSHOT Sconst\n var_name ::= ColId | var_name '.' ColId\n var_list ::= var_value | var_list ',' var_value\n var_value ::= opt_boolean_or_string | NumericOnly\n iso_level ::= READ UNCOMMITTED | READ COMMITTED | REPEATABLE\n READ | SERIALIZABLE\n opt_boolean_or_string ::= TRUE_P | FALSE_P | ON |\n NonReservedWord_or_Sconst\n zone_value ::= Sconst | IDENT | ConstInterval Sconst\n opt_interval | ConstInterval '(' Iconst ')' Sconst | NumericOnly\n | DEFAULT | LOCAL\n opt_encoding ::= Sconst | DEFAULT |\n NonReservedWord_or_Sconst ::= NonReservedWord | Sconst\n VariableResetStmt ::= RESET reset_rest\n reset_rest ::= generic_reset | TIME ZONE | TRANSACTION ISOLATION\n LEVEL | SESSION AUTHORIZATION\n generic_reset ::= var_name | ALL\n SetResetClause ::= SET set_rest | VariableResetStmt\n FunctionSetResetClause ::= SET set_rest_more | VariableResetStmt\n VariableShowStmt ::= SHOW var_name | SHOW TIME ZONE | SHOW\n TRANSACTION ISOLATION LEVEL | SHOW SESSION AUTHORIZATION | SHOW\n ALL\n ConstraintsSetStmt ::= SET CONSTRAINTS constraints_set_list\n constraints_set_mode\n constraints_set_list ::= ALL | qualified_name_list\n constraints_set_mode ::= DEFERRED | IMMEDIATE\n CheckPointStmt ::= CHECKPOINT\n DiscardStmt ::= DISCARD ALL | DISCARD TEMP | DISCARD TEMPORARY |\n DISCARD PLANS | DISCARD SEQUENCES\n AlterTableStmt ::= ALTER TABLE relation_expr alter_table_cmds |\n ALTER TABLE IF_P EXISTS relation_expr alter_table_cmds | ALTER\n TABLE relation_expr partition_cmd | ALTER TABLE IF_P EXISTS\n relation_expr partition_cmd | ALTER TABLE ALL IN_P TABLESPACE\n name SET TABLESPACE name opt_nowait | ALTER TABLE ALL IN_P\n TABLESPACE name OWNED BY role_list SET TABLESPACE name\n opt_nowait | ALTER INDEX qualified_name alter_table_cmds | ALTER\n INDEX IF_P EXISTS qualified_name alter_table_cmds | ALTER INDEX\n qualified_name index_partition_cmd | ALTER INDEX ALL IN_P\n TABLESPACE name SET TABLESPACE name opt_nowait | ALTER INDEX ALL\n IN_P TABLESPACE name OWNED BY role_list SET TABLESPACE name\n opt_nowait | ALTER SEQUENCE qualified_name alter_table_cmds |\n ALTER SEQUENCE IF_P EXISTS qualified_name alter_table_cmds |\n ALTER VIEW qualified_name alter_table_cmds | ALTER VIEW IF_P\n EXISTS qualified_name alter_table_cmds | ALTER MATERIALIZED VIEW\n qualified_name alter_table_cmds | ALTER MATERIALIZED VIEW IF_P\n EXISTS qualified_name alter_table_cmds | ALTER MATERIALIZED VIEW\n ALL IN_P TABLESPACE name SET TABLESPACE name opt_nowait | ALTER\n MATERIALIZED VIEW ALL IN_P TABLESPACE name OWNED BY role_list\n SET TABLESPACE name opt_nowait\n alter_table_cmds ::= alter_table_cmd | alter_table_cmds ','\n alter_table_cmd\n partition_cmd ::= ATTACH PARTITION qualified_name\n PartitionBoundSpec | DETACH PARTITION qualified_name\n index_partition_cmd ::= ATTACH PARTITION qualified_name\n alter_table_cmd ::= ADD_P columnDef | ADD_P IF_P NOT EXISTS\n columnDef | ADD_P COLUMN columnDef | ADD_P COLUMN IF_P NOT\n EXISTS columnDef | ALTER opt_column ColId alter_column_default |\n ALTER opt_column ColId DROP NOT NULL_P | ALTER opt_column ColId\n SET NOT NULL_P | ALTER opt_column ColId DROP EXPRESSION | ALTER\n opt_column ColId DROP EXPRESSION IF_P EXISTS | ALTER opt_column\n ColId SET STATISTICS SignedIconst | ALTER opt_column Iconst SET\n STATISTICS SignedIconst | ALTER opt_column ColId SET reloptions\n | ALTER opt_column ColId RESET reloptions | ALTER opt_column\n ColId SET STORAGE ColId | ALTER opt_column ColId ADD_P GENERATED\n generated_when AS IDENTITY_P OptParenthesizedSeqOptList | ALTER\n opt_column ColId alter_identity_column_option_list | ALTER\n opt_column ColId DROP IDENTITY_P | ALTER opt_column ColId DROP\n IDENTITY_P IF_P EXISTS | DROP opt_column IF_P EXISTS ColId\n opt_drop_behavior | DROP opt_column ColId opt_drop_behavior |\n ALTER opt_column ColId opt_set_data TYPE_P Typename\n opt_collate_clause alter_using | ALTER opt_column ColId\n alter_generic_options | ADD_P TableConstraint | ALTER CONSTRAINT\n name ConstraintAttributeSpec | VALIDATE CONSTRAINT name | DROP\n CONSTRAINT IF_P EXISTS name opt_drop_behavior | DROP CONSTRAINT\n name opt_drop_behavior | SET WITHOUT OIDS | CLUSTER ON name |\n SET WITHOUT CLUSTER | SET LOGGED | SET UNLOGGED | ENABLE_P\n TRIGGER name | ENABLE_P ALWAYS TRIGGER name | ENABLE_P REPLICA\n TRIGGER name | ENABLE_P TRIGGER ALL | ENABLE_P TRIGGER USER |\n DISABLE_P TRIGGER name | DISABLE_P TRIGGER ALL | DISABLE_P\n TRIGGER USER | ENABLE_P RULE name | ENABLE_P ALWAYS RULE name |\n ENABLE_P REPLICA RULE name | DISABLE_P RULE name | INHERIT\n qualified_name | NO INHERIT qualified_name | OF any_name | NOT\n OF | OWNER TO RoleSpec | SET TABLESPACE name | SET reloptions |\n RESET reloptions | REPLICA IDENTITY_P replica_identity |\n ENABLE_P ROW LEVEL SECURITY | DISABLE_P ROW LEVEL SECURITY |\n FORCE ROW LEVEL SECURITY | NO FORCE ROW LEVEL SECURITY |\n alter_generic_options\n alter_column_default ::= SET DEFAULT a_expr | DROP DEFAULT\n opt_drop_behavior ::= CASCADE | RESTRICT |\n opt_collate_clause ::= COLLATE any_name |\n alter_using ::= USING a_expr |\n replica_identity ::= NOTHING | FULL | DEFAULT | USING INDEX name\n reloptions ::= '(' reloption_list ')'\n opt_reloptions ::= WITH reloptions |\n reloption_list ::= reloption_elem | reloption_list ','\n reloption_elem\n reloption_elem ::= ColLabel '=' def_arg | ColLabel | ColLabel\n '.' ColLabel '=' def_arg | ColLabel '.' ColLabel\n alter_identity_column_option_list ::=\n alter_identity_column_option | alter_identity_column_option_list\n alter_identity_column_option\n alter_identity_column_option ::= RESTART | RESTART opt_with\n NumericOnly | SET SeqOptElem | SET GENERATED generated_when\n PartitionBoundSpec ::= FOR VALUES WITH '(' hash_partbound ')' |\n FOR VALUES IN_P '(' expr_list ')' | FOR VALUES FROM '('\n expr_list ')' TO '(' expr_list ')' | DEFAULT\n hash_partbound_elem ::= NonReservedWord Iconst\n hash_partbound ::= hash_partbound_elem | hash_partbound ','\n hash_partbound_elem\n AlterCompositeTypeStmt ::= ALTER TYPE_P any_name alter_type_cmds\n alter_type_cmds ::= alter_type_cmd | alter_type_cmds ','\n alter_type_cmd\n alter_type_cmd ::= ADD_P ATTRIBUTE TableFuncElement\n opt_drop_behavior | DROP ATTRIBUTE IF_P EXISTS ColId\n opt_drop_behavior | DROP ATTRIBUTE ColId opt_drop_behavior |\n ALTER ATTRIBUTE ColId opt_set_data TYPE_P Typename\n opt_collate_clause opt_drop_behavior\n ClosePortalStmt ::= CLOSE cursor_name | CLOSE ALL\n CopyStmt ::= COPY opt_binary qualified_name opt_column_list\n copy_from opt_program copy_file_name copy_delimiter opt_with\n copy_options where_clause | COPY '(' PreparableStmt ')' TO\n opt_program copy_file_name opt_with copy_options\n copy_from ::= FROM | TO\n opt_program ::= PROGRAM |\n copy_file_name ::= Sconst | STDIN | STDOUT\n copy_options ::= copy_opt_list | '(' copy_generic_opt_list ')'\n copy_opt_list ::= copy_opt_list copy_opt_item |\n copy_opt_item ::= BINARY | FREEZE | DELIMITER opt_as Sconst |\n NULL_P opt_as Sconst | CSV | HEADER_P | QUOTE opt_as Sconst |\n ESCAPE opt_as Sconst | FORCE QUOTE columnList | FORCE QUOTE '*'\n | FORCE NOT NULL_P columnList | FORCE NULL_P columnList |\n ENCODING Sconst\n opt_binary ::= BINARY |\n copy_delimiter ::= opt_using DELIMITERS Sconst |\n opt_using ::= USING |\n copy_generic_opt_list ::= copy_generic_opt_elem |\n copy_generic_opt_list ',' copy_generic_opt_elem\n copy_generic_opt_elem ::= ColLabel copy_generic_opt_arg\n copy_generic_opt_arg ::= opt_boolean_or_string | NumericOnly |\n '*' | '(' copy_generic_opt_arg_list ')' |\n copy_generic_opt_arg_list ::= copy_generic_opt_arg_list_item |\n copy_generic_opt_arg_list ',' copy_generic_opt_arg_list_item\n copy_generic_opt_arg_list_item ::= opt_boolean_or_string\n CreateStmt ::= CREATE OptTemp TABLE qualified_name '('\n OptTableElementList ')' OptInherit OptPartitionSpec\n table_access_method_clause OptWith OnCommitOption OptTableSpace\n | CREATE OptTemp TABLE IF_P NOT EXISTS qualified_name '('\n OptTableElementList ')' OptInherit OptPartitionSpec\n table_access_method_clause OptWith OnCommitOption OptTableSpace\n | CREATE OptTemp TABLE qualified_name OF any_name\n OptTypedTableElementList OptPartitionSpec\n table_access_method_clause OptWith OnCommitOption OptTableSpace\n | CREATE OptTemp TABLE IF_P NOT EXISTS qualified_name OF\n any_name OptTypedTableElementList OptPartitionSpec\n table_access_method_clause OptWith OnCommitOption OptTableSpace\n | CREATE OptTemp TABLE qualified_name PARTITION OF\n qualified_name OptTypedTableElementList PartitionBoundSpec\n OptPartitionSpec table_access_method_clause OptWith\n OnCommitOption OptTableSpace | CREATE OptTemp TABLE IF_P NOT\n EXISTS qualified_name PARTITION OF qualified_name\n OptTypedTableElementList PartitionBoundSpec OptPartitionSpec\n table_access_method_clause OptWith OnCommitOption OptTableSpace\n OptTemp ::= TEMPORARY | TEMP | LOCAL TEMPORARY | LOCAL TEMP |\n GLOBAL TEMPORARY | GLOBAL TEMP | UNLOGGED |\n OptTableElementList ::= TableElementList |\n OptTypedTableElementList ::= '(' TypedTableElementList ')' |\n TableElementList ::= TableElement | TableElementList ','\n TableElement\n TypedTableElementList ::= TypedTableElement |\n TypedTableElementList ',' TypedTableElement\n TableElement ::= columnDef | TableLikeClause | TableConstraint\n TypedTableElement ::= columnOptions | TableConstraint\n columnDef ::= ColId Typename create_generic_options ColQualList\n columnOptions ::= ColId ColQualList | ColId WITH OPTIONS\n ColQualList\n ColQualList ::= ColQualList ColConstraint |\n ColConstraint ::= CONSTRAINT name ColConstraintElem |\n ColConstraintElem | ConstraintAttr | COLLATE any_name\n ColConstraintElem ::= NOT NULL_P | NULL_P | UNIQUE\n opt_definition OptConsTableSpace | PRIMARY KEY opt_definition\n OptConsTableSpace | CHECK '(' a_expr ')' opt_no_inherit |\n DEFAULT b_expr | GENERATED generated_when AS IDENTITY_P\n OptParenthesizedSeqOptList | GENERATED generated_when AS '('\n a_expr ')' STORED | REFERENCES qualified_name opt_column_list\n key_match key_actions\n generated_when ::= ALWAYS | BY DEFAULT\n ConstraintAttr ::= DEFERRABLE | NOT DEFERRABLE | INITIALLY\n DEFERRED | INITIALLY IMMEDIATE\n TableLikeClause ::= LIKE qualified_name TableLikeOptionList\n TableLikeOptionList ::= TableLikeOptionList INCLUDING\n TableLikeOption | TableLikeOptionList EXCLUDING TableLikeOption\n |\n TableLikeOption ::= COMMENTS | CONSTRAINTS | DEFAULTS |\n IDENTITY_P | GENERATED | INDEXES | STATISTICS | STORAGE | ALL\n TableConstraint ::= CONSTRAINT name ConstraintElem |\n ConstraintElem\n ConstraintElem ::= CHECK '(' a_expr ')' ConstraintAttributeSpec\n | UNIQUE '(' columnList ')' opt_c_include opt_definition\n OptConsTableSpace ConstraintAttributeSpec | UNIQUE ExistingIndex\n ConstraintAttributeSpec | PRIMARY KEY '(' columnList ')'\n opt_c_include opt_definition OptConsTableSpace\n ConstraintAttributeSpec | PRIMARY KEY ExistingIndex\n ConstraintAttributeSpec | EXCLUDE access_method_clause '('\n ExclusionConstraintList ')' opt_c_include opt_definition\n OptConsTableSpace ExclusionWhereClause ConstraintAttributeSpec |\n FOREIGN KEY '(' columnList ')' REFERENCES qualified_name\n opt_column_list key_match key_actions ConstraintAttributeSpec\n opt_no_inherit ::= NO INHERIT |\n opt_column_list ::= '(' columnList ')' |\n columnList ::= columnElem | columnList ',' columnElem\n columnElem ::= ColId\n opt_c_include ::= INCLUDE '(' columnList ')' |\n key_match ::= MATCH FULL | MATCH PARTIAL | MATCH SIMPLE |\n ExclusionConstraintList ::= ExclusionConstraintElem |\n ExclusionConstraintList ',' ExclusionConstraintElem\n ExclusionConstraintElem ::= index_elem WITH any_operator |\n index_elem WITH OPERATOR '(' any_operator ')'\n ExclusionWhereClause ::= WHERE '(' a_expr ')' |\n key_actions ::= key_update | key_delete | key_update key_delete\n | key_delete key_update |\n key_update ::= ON UPDATE key_action\n key_delete ::= ON DELETE_P key_action\n key_action ::= NO ACTION | RESTRICT | CASCADE | SET NULL_P | SET\n DEFAULT\n OptInherit ::= INHERITS '(' qualified_name_list ')' |\n OptPartitionSpec ::= PartitionSpec |\n PartitionSpec ::= PARTITION BY ColId '(' part_params ')'\n part_params ::= part_elem | part_params ',' part_elem\n part_elem ::= ColId opt_collate opt_class | func_expr_windowless\n opt_collate opt_class | '(' a_expr ')' opt_collate opt_class\n table_access_method_clause ::= USING access_method |\n OptWith ::= WITH reloptions | WITHOUT OIDS |\n OnCommitOption ::= ON COMMIT DROP | ON COMMIT DELETE_P ROWS | ON\n COMMIT PRESERVE ROWS |\n OptTableSpace ::= TABLESPACE name |\n OptConsTableSpace ::= USING INDEX TABLESPACE name |\n ExistingIndex ::= USING INDEX index_name\n CreateStatsStmt ::= CREATE STATISTICS any_name opt_name_list ON\n expr_list FROM from_list | CREATE STATISTICS IF_P NOT EXISTS\n any_name opt_name_list ON expr_list FROM from_list\n AlterStatsStmt ::= ALTER STATISTICS any_name SET STATISTICS\n SignedIconst | ALTER STATISTICS IF_P EXISTS any_name SET\n STATISTICS SignedIconst\n CreateAsStmt ::= CREATE OptTemp TABLE create_as_target AS\n SelectStmt opt_with_data | CREATE OptTemp TABLE IF_P NOT EXISTS\n create_as_target AS SelectStmt opt_with_data\n create_as_target ::= qualified_name opt_column_list\n table_access_method_clause OptWith OnCommitOption OptTableSpace\n opt_with_data ::= WITH DATA_P | WITH NO DATA_P |\n CreateMatViewStmt ::= CREATE OptNoLog MATERIALIZED VIEW\n create_mv_target AS SelectStmt opt_with_data | CREATE OptNoLog\n MATERIALIZED VIEW IF_P NOT EXISTS create_mv_target AS SelectStmt\n opt_with_data\n create_mv_target ::= qualified_name opt_column_list\n table_access_method_clause opt_reloptions OptTableSpace\n OptNoLog ::= UNLOGGED |\n RefreshMatViewStmt ::= REFRESH MATERIALIZED VIEW\n opt_concurrently qualified_name opt_with_data\n CreateSeqStmt ::= CREATE OptTemp SEQUENCE qualified_name\n OptSeqOptList | CREATE OptTemp SEQUENCE IF_P NOT EXISTS\n qualified_name OptSeqOptList\n AlterSeqStmt ::= ALTER SEQUENCE qualified_name SeqOptList |\n ALTER SEQUENCE IF_P EXISTS qualified_name SeqOptList\n OptSeqOptList ::= SeqOptList |\n OptParenthesizedSeqOptList ::= '(' SeqOptList ')' |\n SeqOptList ::= SeqOptElem | SeqOptList SeqOptElem\n SeqOptElem ::= AS SimpleTypename | CACHE NumericOnly | CYCLE |\n NO CYCLE | INCREMENT opt_by NumericOnly | MAXVALUE NumericOnly |\n MINVALUE NumericOnly | NO MAXVALUE | NO MINVALUE | OWNED BY\n any_name | SEQUENCE NAME_P any_name | START opt_with NumericOnly\n | RESTART | RESTART opt_with NumericOnly\n opt_by ::= BY |\n NumericOnly ::= FCONST | '+' FCONST | '-' FCONST | SignedIconst\n NumericOnly_list ::= NumericOnly | NumericOnly_list ','\n NumericOnly\n CreatePLangStmt ::= CREATE opt_or_replace opt_trusted\n opt_procedural LANGUAGE NonReservedWord_or_Sconst | CREATE\n opt_or_replace opt_trusted opt_procedural LANGUAGE\n NonReservedWord_or_Sconst HANDLER handler_name\n opt_inline_handler opt_validator\n opt_trusted ::= TRUSTED |\n handler_name ::= name | name attrs\n opt_inline_handler ::= INLINE_P handler_name |\n validator_clause ::= VALIDATOR handler_name | NO VALIDATOR\n opt_validator ::= validator_clause |\n DropPLangStmt ::= DROP opt_procedural LANGUAGE\n NonReservedWord_or_Sconst opt_drop_behavior | DROP\n opt_procedural LANGUAGE IF_P EXISTS NonReservedWord_or_Sconst\n opt_drop_behavior\n opt_procedural ::= PROCEDURAL |\n CreateTableSpaceStmt ::= CREATE TABLESPACE name\n OptTableSpaceOwner LOCATION Sconst opt_reloptions\n OptTableSpaceOwner ::= OWNER RoleSpec |\n DropTableSpaceStmt ::= DROP TABLESPACE name | DROP TABLESPACE\n IF_P EXISTS name\n CreateExtensionStmt ::= CREATE EXTENSION name opt_with\n create_extension_opt_list | CREATE EXTENSION IF_P NOT EXISTS\n name opt_with create_extension_opt_list\n create_extension_opt_list ::= create_extension_opt_list\n create_extension_opt_item |\n create_extension_opt_item ::= SCHEMA name | VERSION_P\n NonReservedWord_or_Sconst | FROM NonReservedWord_or_Sconst |\n CASCADE\n AlterExtensionStmt ::= ALTER EXTENSION name UPDATE\n alter_extension_opt_list\n alter_extension_opt_list ::= alter_extension_opt_list\n alter_extension_opt_item |\n alter_extension_opt_item ::= TO NonReservedWord_or_Sconst\n AlterExtensionContentsStmt ::= ALTER EXTENSION name add_drop\n ACCESS METHOD name | ALTER EXTENSION name add_drop AGGREGATE\n aggregate_with_argtypes | ALTER EXTENSION name add_drop CAST '('\n Typename AS Typename ')' | ALTER EXTENSION name add_drop\n COLLATION any_name | ALTER EXTENSION name add_drop CONVERSION_P\n any_name | ALTER EXTENSION name add_drop DOMAIN_P Typename |\n ALTER EXTENSION name add_drop FUNCTION function_with_argtypes |\n ALTER EXTENSION name add_drop opt_procedural LANGUAGE name |\n ALTER EXTENSION name add_drop OPERATOR operator_with_argtypes |\n ALTER EXTENSION name add_drop OPERATOR CLASS any_name USING\n access_method | ALTER EXTENSION name add_drop OPERATOR FAMILY\n any_name USING access_method | ALTER EXTENSION name add_drop\n PROCEDURE function_with_argtypes | ALTER EXTENSION name add_drop\n ROUTINE function_with_argtypes | ALTER EXTENSION name add_drop\n SCHEMA name | ALTER EXTENSION name add_drop EVENT TRIGGER name |\n ALTER EXTENSION name add_drop TABLE any_name | ALTER EXTENSION\n name add_drop TEXT_P SEARCH PARSER any_name | ALTER EXTENSION\n name add_drop TEXT_P SEARCH DICTIONARY any_name | ALTER\n EXTENSION name add_drop TEXT_P SEARCH TEMPLATE any_name | ALTER\n EXTENSION name add_drop TEXT_P SEARCH CONFIGURATION any_name |\n ALTER EXTENSION name add_drop SEQUENCE any_name | ALTER\n EXTENSION name add_drop VIEW any_name | ALTER EXTENSION name\n add_drop MATERIALIZED VIEW any_name | ALTER EXTENSION name\n add_drop FOREIGN TABLE any_name | ALTER EXTENSION name add_drop\n FOREIGN DATA_P WRAPPER name | ALTER EXTENSION name add_drop\n SERVER name | ALTER EXTENSION name add_drop TRANSFORM FOR\n Typename LANGUAGE name | ALTER EXTENSION name add_drop TYPE_P\n Typename\n CreateFdwStmt ::= CREATE FOREIGN DATA_P WRAPPER name\n opt_fdw_options create_generic_options\n fdw_option ::= HANDLER handler_name | NO HANDLER | VALIDATOR\n handler_name | NO VALIDATOR\n fdw_options ::= fdw_option | fdw_options fdw_option\n opt_fdw_options ::= fdw_options |\n AlterFdwStmt ::= ALTER FOREIGN DATA_P WRAPPER name\n opt_fdw_options alter_generic_options | ALTER FOREIGN DATA_P\n WRAPPER name fdw_options\n create_generic_options ::= OPTIONS '(' generic_option_list ')' |\n generic_option_list ::= generic_option_elem |\n generic_option_list ',' generic_option_elem\n alter_generic_options ::= OPTIONS '(' alter_generic_option_list\n ')'\n alter_generic_option_list ::= alter_generic_option_elem |\n alter_generic_option_list ',' alter_generic_option_elem\n alter_generic_option_elem ::= generic_option_elem | SET\n generic_option_elem | ADD_P generic_option_elem | DROP\n generic_option_name\n generic_option_elem ::= generic_option_name generic_option_arg\n generic_option_name ::= ColLabel\n generic_option_arg ::= Sconst\n CreateForeignServerStmt ::= CREATE SERVER name opt_type\n opt_foreign_server_version FOREIGN DATA_P WRAPPER name\n create_generic_options | CREATE SERVER IF_P NOT EXISTS name\n opt_type opt_foreign_server_version FOREIGN DATA_P WRAPPER name\n create_generic_options\n opt_type ::= TYPE_P Sconst |\n foreign_server_version ::= VERSION_P Sconst | VERSION_P NULL_P\n opt_foreign_server_version ::= foreign_server_version |\n AlterForeignServerStmt ::= ALTER SERVER name\n foreign_server_version alter_generic_options | ALTER SERVER name\n foreign_server_version | ALTER SERVER name alter_generic_options\n CreateForeignTableStmt ::= CREATE FOREIGN TABLE qualified_name\n '(' OptTableElementList ')' OptInherit SERVER name\n create_generic_options | CREATE FOREIGN TABLE IF_P NOT EXISTS\n qualified_name '(' OptTableElementList ')' OptInherit SERVER\n name create_generic_options | CREATE FOREIGN TABLE\n qualified_name PARTITION OF qualified_name\n OptTypedTableElementList PartitionBoundSpec SERVER name\n create_generic_options | CREATE FOREIGN TABLE IF_P NOT EXISTS\n qualified_name PARTITION OF qualified_name\n OptTypedTableElementList PartitionBoundSpec SERVER name\n create_generic_options\n AlterForeignTableStmt ::= ALTER FOREIGN TABLE relation_expr\n alter_table_cmds | ALTER FOREIGN TABLE IF_P EXISTS relation_expr\n alter_table_cmds\n ImportForeignSchemaStmt ::= IMPORT_P FOREIGN SCHEMA name\n import_qualification FROM SERVER name INTO name\n create_generic_options\n import_qualification_type ::= LIMIT TO | EXCEPT\n import_qualification ::= import_qualification_type '('\n relation_expr_list ')' |\n CreateUserMappingStmt ::= CREATE USER MAPPING FOR auth_ident\n SERVER name create_generic_options | CREATE USER MAPPING IF_P\n NOT EXISTS FOR auth_ident SERVER name create_generic_options\n auth_ident ::= RoleSpec | USER\n DropUserMappingStmt ::= DROP USER MAPPING FOR auth_ident SERVER\n name | DROP USER MAPPING IF_P EXISTS FOR auth_ident SERVER name\n AlterUserMappingStmt ::= ALTER USER MAPPING FOR auth_ident\n SERVER name alter_generic_options\n CreatePolicyStmt ::= CREATE POLICY name ON qualified_name\n RowSecurityDefaultPermissive RowSecurityDefaultForCmd\n RowSecurityDefaultToRole RowSecurityOptionalExpr\n RowSecurityOptionalWithCheck\n AlterPolicyStmt ::= ALTER POLICY name ON qualified_name\n RowSecurityOptionalToRole RowSecurityOptionalExpr\n RowSecurityOptionalWithCheck\n RowSecurityOptionalExpr ::= USING '(' a_expr ')' |\n RowSecurityOptionalWithCheck ::= WITH CHECK '(' a_expr ')' |\n RowSecurityDefaultToRole ::= TO role_list |\n RowSecurityOptionalToRole ::= TO role_list |\n RowSecurityDefaultPermissive ::= AS IDENT |\n RowSecurityDefaultForCmd ::= FOR row_security_cmd |\n row_security_cmd ::= ALL | SELECT | INSERT | UPDATE | DELETE_P\n CreateAmStmt ::= CREATE ACCESS METHOD name TYPE_P am_type\n HANDLER handler_name\n am_type ::= INDEX | TABLE\n CreateTrigStmt ::= CREATE TRIGGER name TriggerActionTime\n TriggerEvents ON qualified_name TriggerReferencing\n TriggerForSpec TriggerWhen EXECUTE FUNCTION_or_PROCEDURE\n func_name '(' TriggerFuncArgs ')' | CREATE CONSTRAINT TRIGGER\n name AFTER TriggerEvents ON qualified_name OptConstrFromTable\n ConstraintAttributeSpec FOR EACH ROW TriggerWhen EXECUTE\n FUNCTION_or_PROCEDURE func_name '(' TriggerFuncArgs ')'\n TriggerActionTime ::= BEFORE | AFTER | INSTEAD OF\n TriggerEvents ::= TriggerOneEvent | TriggerEvents OR\n TriggerOneEvent\n TriggerOneEvent ::= INSERT | DELETE_P | UPDATE | UPDATE OF\n columnList | TRUNCATE\n TriggerReferencing ::= REFERENCING TriggerTransitions |\n TriggerTransitions ::= TriggerTransition | TriggerTransitions\n TriggerTransition\n TriggerTransition ::= TransitionOldOrNew TransitionRowOrTable\n opt_as TransitionRelName\n TransitionOldOrNew ::= NEW | OLD\n TransitionRowOrTable ::= TABLE | ROW\n TransitionRelName ::= ColId\n TriggerForSpec ::= FOR TriggerForOptEach TriggerForType |\n TriggerForOptEach ::= EACH |\n TriggerForType ::= ROW | STATEMENT\n TriggerWhen ::= WHEN '(' a_expr ')' |\n FUNCTION_or_PROCEDURE ::= FUNCTION | PROCEDURE\n TriggerFuncArgs ::= TriggerFuncArg | TriggerFuncArgs ','\n TriggerFuncArg |\n TriggerFuncArg ::= Iconst | FCONST | Sconst | ColLabel\n OptConstrFromTable ::= FROM qualified_name |\n ConstraintAttributeSpec ::= | ConstraintAttributeSpec\n ConstraintAttributeElem\n ConstraintAttributeElem ::= NOT DEFERRABLE | DEFERRABLE |\n INITIALLY IMMEDIATE | INITIALLY DEFERRED | NOT VALID | NO\n INHERIT\n CreateEventTrigStmt ::= CREATE EVENT TRIGGER name ON ColLabel\n EXECUTE FUNCTION_or_PROCEDURE func_name '(' ')' | CREATE EVENT\n TRIGGER name ON ColLabel WHEN event_trigger_when_list EXECUTE\n FUNCTION_or_PROCEDURE func_name '(' ')'\n event_trigger_when_list ::= event_trigger_when_item |\n event_trigger_when_list AND event_trigger_when_item\n event_trigger_when_item ::= ColId IN_P '('\n event_trigger_value_list ')'\n event_trigger_value_list ::= SCONST | event_trigger_value_list\n ',' SCONST\n AlterEventTrigStmt ::= ALTER EVENT TRIGGER name enable_trigger\n enable_trigger ::= ENABLE_P | ENABLE_P REPLICA | ENABLE_P ALWAYS\n | DISABLE_P\n CreateAssertionStmt ::= CREATE ASSERTION any_name CHECK '('\n a_expr ')' ConstraintAttributeSpec\n DefineStmt ::= CREATE opt_or_replace AGGREGATE func_name\n aggr_args definition | CREATE opt_or_replace AGGREGATE func_name\n old_aggr_definition | CREATE OPERATOR any_operator definition |\n CREATE TYPE_P any_name definition | CREATE TYPE_P any_name |\n CREATE TYPE_P any_name AS '(' OptTableFuncElementList ')' |\n CREATE TYPE_P any_name AS ENUM_P '(' opt_enum_val_list ')' |\n CREATE TYPE_P any_name AS RANGE definition | CREATE TEXT_P\n SEARCH PARSER any_name definition | CREATE TEXT_P SEARCH\n DICTIONARY any_name definition | CREATE TEXT_P SEARCH TEMPLATE\n any_name definition | CREATE TEXT_P SEARCH CONFIGURATION\n any_name definition | CREATE COLLATION any_name definition |\n CREATE COLLATION IF_P NOT EXISTS any_name definition | CREATE\n COLLATION any_name FROM any_name | CREATE COLLATION IF_P NOT\n EXISTS any_name FROM any_name\n definition ::= '(' def_list ')'\n def_list ::= def_elem | def_list ',' def_elem\n def_elem ::= ColLabel '=' def_arg | ColLabel\n def_arg ::= func_type | reserved_keyword | qual_all_Op |\n NumericOnly | Sconst | NONE\n old_aggr_definition ::= '(' old_aggr_list ')'\n old_aggr_list ::= old_aggr_elem | old_aggr_list ','\n old_aggr_elem\n old_aggr_elem ::= IDENT '=' def_arg\n opt_enum_val_list ::= enum_val_list |\n enum_val_list ::= Sconst | enum_val_list ',' Sconst\n AlterEnumStmt ::= ALTER TYPE_P any_name ADD_P VALUE_P\n opt_if_not_exists Sconst | ALTER TYPE_P any_name ADD_P VALUE_P\n opt_if_not_exists Sconst BEFORE Sconst | ALTER TYPE_P any_name\n ADD_P VALUE_P opt_if_not_exists Sconst AFTER Sconst | ALTER\n TYPE_P any_name RENAME VALUE_P Sconst TO Sconst\n opt_if_not_exists ::= IF_P NOT EXISTS |\n CreateOpClassStmt ::= CREATE OPERATOR CLASS any_name opt_default\n FOR TYPE_P Typename USING access_method opt_opfamily AS\n opclass_item_list\n opclass_item_list ::= opclass_item | opclass_item_list ','\n opclass_item\n opclass_item ::= OPERATOR Iconst any_operator opclass_purpose\n opt_recheck | OPERATOR Iconst operator_with_argtypes\n opclass_purpose opt_recheck | FUNCTION Iconst\n function_with_argtypes | FUNCTION Iconst '(' type_list ')'\n function_with_argtypes | STORAGE Typename\n opt_default ::= DEFAULT |\n opt_opfamily ::= FAMILY any_name |\n opclass_purpose ::= FOR SEARCH | FOR ORDER BY any_name |\n opt_recheck ::= RECHECK |\n CreateOpFamilyStmt ::= CREATE OPERATOR FAMILY any_name USING\n access_method\n AlterOpFamilyStmt ::= ALTER OPERATOR FAMILY any_name USING\n access_method ADD_P opclass_item_list | ALTER OPERATOR FAMILY\n any_name USING access_method DROP opclass_drop_list\n opclass_drop_list ::= opclass_drop | opclass_drop_list ','\n opclass_drop\n opclass_drop ::= OPERATOR Iconst '(' type_list ')' | FUNCTION\n Iconst '(' type_list ')'\n DropOpClassStmt ::= DROP OPERATOR CLASS any_name USING\n access_method opt_drop_behavior | DROP OPERATOR CLASS IF_P\n EXISTS any_name USING access_method opt_drop_behavior\n DropOpFamilyStmt ::= DROP OPERATOR FAMILY any_name USING\n access_method opt_drop_behavior | DROP OPERATOR FAMILY IF_P\n EXISTS any_name USING access_method opt_drop_behavior\n DropOwnedStmt ::= DROP OWNED BY role_list opt_drop_behavior\n ReassignOwnedStmt ::= REASSIGN OWNED BY role_list TO RoleSpec\n DropStmt ::= DROP drop_type_any_name IF_P EXISTS any_name_list\n opt_drop_behavior | DROP drop_type_any_name any_name_list\n opt_drop_behavior | DROP drop_type_name IF_P EXISTS name_list\n opt_drop_behavior | DROP drop_type_name name_list\n opt_drop_behavior | DROP drop_type_name_on_any_name name ON\n any_name opt_drop_behavior | DROP drop_type_name_on_any_name\n IF_P EXISTS name ON any_name opt_drop_behavior | DROP TYPE_P\n type_name_list opt_drop_behavior | DROP TYPE_P IF_P EXISTS\n type_name_list opt_drop_behavior | DROP DOMAIN_P type_name_list\n opt_drop_behavior | DROP DOMAIN_P IF_P EXISTS type_name_list\n opt_drop_behavior | DROP INDEX CONCURRENTLY any_name_list\n opt_drop_behavior | DROP INDEX CONCURRENTLY IF_P EXISTS\n any_name_list opt_drop_behavior\n drop_type_any_name ::= TABLE | SEQUENCE | VIEW | MATERIALIZED\n VIEW | INDEX | FOREIGN TABLE | COLLATION | CONVERSION_P |\n STATISTICS | TEXT_P SEARCH PARSER | TEXT_P SEARCH DICTIONARY |\n TEXT_P SEARCH TEMPLATE | TEXT_P SEARCH CONFIGURATION\n drop_type_name ::= ACCESS METHOD | EVENT TRIGGER | EXTENSION |\n FOREIGN DATA_P WRAPPER | PUBLICATION | SCHEMA | SERVER\n drop_type_name_on_any_name ::= POLICY | RULE | TRIGGER\n any_name_list ::= any_name | any_name_list ',' any_name\n any_name ::= ColId | ColId attrs\n attrs ::= '.' attr_name | attrs '.' attr_name\n type_name_list ::= Typename | type_name_list ',' Typename\n TruncateStmt ::= TRUNCATE opt_table relation_expr_list\n opt_restart_seqs opt_drop_behavior\n opt_restart_seqs ::= CONTINUE_P IDENTITY_P | RESTART IDENTITY_P\n |\n CommentStmt ::= COMMENT ON comment_type_any_name any_name IS\n comment_text | COMMENT ON comment_type_name name IS comment_text\n | COMMENT ON TYPE_P Typename IS comment_text | COMMENT ON\n DOMAIN_P Typename IS comment_text | COMMENT ON AGGREGATE\n aggregate_with_argtypes IS comment_text | COMMENT ON FUNCTION\n function_with_argtypes IS comment_text | COMMENT ON OPERATOR\n operator_with_argtypes IS comment_text | COMMENT ON CONSTRAINT\n name ON any_name IS comment_text | COMMENT ON CONSTRAINT name ON\n DOMAIN_P any_name IS comment_text | COMMENT ON POLICY name ON\n any_name IS comment_text | COMMENT ON PROCEDURE\n function_with_argtypes IS comment_text | COMMENT ON ROUTINE\n function_with_argtypes IS comment_text | COMMENT ON RULE name ON\n any_name IS comment_text | COMMENT ON TRANSFORM FOR Typename\n LANGUAGE name IS comment_text | COMMENT ON TRIGGER name ON\n any_name IS comment_text | COMMENT ON OPERATOR CLASS any_name\n USING access_method IS comment_text | COMMENT ON OPERATOR FAMILY\n any_name USING access_method IS comment_text | COMMENT ON\n LARGE_P OBJECT_P NumericOnly IS comment_text | COMMENT ON CAST\n '(' Typename AS Typename ')' IS comment_text\n comment_type_any_name ::= COLUMN | INDEX | SEQUENCE | STATISTICS\n | TABLE | VIEW | MATERIALIZED VIEW | COLLATION | CONVERSION_P |\n FOREIGN TABLE | TEXT_P SEARCH CONFIGURATION | TEXT_P SEARCH\n DICTIONARY | TEXT_P SEARCH PARSER | TEXT_P SEARCH TEMPLATE\n comment_type_name ::= ACCESS METHOD | DATABASE | EVENT TRIGGER |\n EXTENSION | FOREIGN DATA_P WRAPPER | opt_procedural LANGUAGE |\n PUBLICATION | ROLE | SCHEMA | SERVER | SUBSCRIPTION | TABLESPACE\n comment_text ::= Sconst | NULL_P\n SecLabelStmt ::= SECURITY LABEL opt_provider ON\n security_label_type_any_name any_name IS security_label |\n SECURITY LABEL opt_provider ON security_label_type_name name IS\n security_label | SECURITY LABEL opt_provider ON TYPE_P Typename\n IS security_label | SECURITY LABEL opt_provider ON DOMAIN_P\n Typename IS security_label | SECURITY LABEL opt_provider ON\n AGGREGATE aggregate_with_argtypes IS security_label | SECURITY\n LABEL opt_provider ON FUNCTION function_with_argtypes IS\n security_label | SECURITY LABEL opt_provider ON LARGE_P OBJECT_P\n NumericOnly IS security_label | SECURITY LABEL opt_provider ON\n PROCEDURE function_with_argtypes IS security_label | SECURITY\n LABEL opt_provider ON ROUTINE function_with_argtypes IS\n security_label\n opt_provider ::= FOR NonReservedWord_or_Sconst |\n security_label_type_any_name ::= COLUMN | FOREIGN TABLE |\n SEQUENCE | TABLE | VIEW | MATERIALIZED VIEW\n security_label_type_name ::= DATABASE | EVENT TRIGGER |\n opt_procedural LANGUAGE | PUBLICATION | ROLE | SCHEMA |\n SUBSCRIPTION | TABLESPACE\n security_label ::= Sconst | NULL_P\n FetchStmt ::= FETCH fetch_args | MOVE fetch_args\n fetch_args ::= cursor_name | from_in cursor_name | NEXT\n opt_from_in cursor_name | PRIOR opt_from_in cursor_name |\n FIRST_P opt_from_in cursor_name | LAST_P opt_from_in cursor_name\n | ABSOLUTE_P SignedIconst opt_from_in cursor_name | RELATIVE_P\n SignedIconst opt_from_in cursor_name | SignedIconst opt_from_in\n cursor_name | ALL opt_from_in cursor_name | FORWARD opt_from_in\n cursor_name | FORWARD SignedIconst opt_from_in cursor_name |\n FORWARD ALL opt_from_in cursor_name | BACKWARD opt_from_in\n cursor_name | BACKWARD SignedIconst opt_from_in cursor_name |\n BACKWARD ALL opt_from_in cursor_name\n from_in ::= FROM | IN_P\n opt_from_in ::= from_in |\n GrantStmt ::= GRANT privileges ON privilege_target TO\n grantee_list opt_grant_grant_option\n RevokeStmt ::= REVOKE privileges ON privilege_target FROM\n grantee_list opt_drop_behavior | REVOKE GRANT OPTION FOR\n privileges ON privilege_target FROM grantee_list\n opt_drop_behavior\n privileges ::= privilege_list | ALL | ALL PRIVILEGES | ALL '('\n columnList ')' | ALL PRIVILEGES '(' columnList ')'\n privilege_list ::= privilege | privilege_list ',' privilege\n privilege ::= SELECT opt_column_list | REFERENCES\n opt_column_list | CREATE opt_column_list | ColId opt_column_list\n privilege_target ::= qualified_name_list | TABLE\n qualified_name_list | SEQUENCE qualified_name_list | FOREIGN\n DATA_P WRAPPER name_list | FOREIGN SERVER name_list | FUNCTION\n function_with_argtypes_list | PROCEDURE\n function_with_argtypes_list | ROUTINE\n function_with_argtypes_list | DATABASE name_list | DOMAIN_P\n any_name_list | LANGUAGE name_list | LARGE_P OBJECT_P\n NumericOnly_list | SCHEMA name_list | TABLESPACE name_list |\n TYPE_P any_name_list | ALL TABLES IN_P SCHEMA name_list | ALL\n SEQUENCES IN_P SCHEMA name_list | ALL FUNCTIONS IN_P SCHEMA\n name_list | ALL PROCEDURES IN_P SCHEMA name_list | ALL ROUTINES\n IN_P SCHEMA name_list\n grantee_list ::= grantee | grantee_list ',' grantee\n grantee ::= RoleSpec | GROUP_P RoleSpec\n opt_grant_grant_option ::= WITH GRANT OPTION |\n GrantRoleStmt ::= GRANT privilege_list TO role_list\n opt_grant_admin_option opt_granted_by\n RevokeRoleStmt ::= REVOKE privilege_list FROM role_list\n opt_granted_by opt_drop_behavior | REVOKE ADMIN OPTION FOR\n privilege_list FROM role_list opt_granted_by opt_drop_behavior\n opt_grant_admin_option ::= WITH ADMIN OPTION |\n opt_granted_by ::= GRANTED BY RoleSpec |\n AlterDefaultPrivilegesStmt ::= ALTER DEFAULT PRIVILEGES\n DefACLOptionList DefACLAction\n DefACLOptionList ::= DefACLOptionList DefACLOption |\n DefACLOption ::= IN_P SCHEMA name_list | FOR ROLE role_list |\n FOR USER role_list\n DefACLAction ::= GRANT privileges ON defacl_privilege_target TO\n grantee_list opt_grant_grant_option | REVOKE privileges ON\n defacl_privilege_target FROM grantee_list opt_drop_behavior |\n REVOKE GRANT OPTION FOR privileges ON defacl_privilege_target\n FROM grantee_list opt_drop_behavior\n defacl_privilege_target ::= TABLES | FUNCTIONS | ROUTINES |\n SEQUENCES | TYPES_P | SCHEMAS\n IndexStmt ::= CREATE opt_unique INDEX opt_concurrently\n opt_index_name ON relation_expr access_method_clause '('\n index_params ')' opt_include opt_reloptions OptTableSpace\n where_clause | CREATE opt_unique INDEX opt_concurrently IF_P NOT\n EXISTS index_name ON relation_expr access_method_clause '('\n index_params ')' opt_include opt_reloptions OptTableSpace\n where_clause\n opt_unique ::= UNIQUE |\n opt_concurrently ::= CONCURRENTLY |\n opt_index_name ::= index_name |\n access_method_clause ::= USING access_method |\n index_params ::= index_elem | index_params ',' index_elem\n index_elem_options ::= opt_collate opt_class opt_asc_desc\n opt_nulls_order | opt_collate any_name reloptions opt_asc_desc\n opt_nulls_order\n index_elem ::= ColId index_elem_options | func_expr_windowless\n index_elem_options | '(' a_expr ')' index_elem_options\n opt_include ::= INCLUDE '(' index_including_params ')' |\n index_including_params ::= index_elem | index_including_params\n ',' index_elem\n opt_collate ::= COLLATE any_name |\n opt_class ::= any_name |\n opt_asc_desc ::= ASC | DESC |\n opt_nulls_order ::= NULLS_LA FIRST_P | NULLS_LA LAST_P |\n CreateFunctionStmt ::= CREATE opt_or_replace FUNCTION func_name\n func_args_with_defaults RETURNS func_return createfunc_opt_list\n | CREATE opt_or_replace FUNCTION func_name\n func_args_with_defaults RETURNS TABLE '(' table_func_column_list\n ')' createfunc_opt_list | CREATE opt_or_replace FUNCTION\n func_name func_args_with_defaults createfunc_opt_list | CREATE\n opt_or_replace PROCEDURE func_name func_args_with_defaults\n createfunc_opt_list\n opt_or_replace ::= OR REPLACE |\n func_args ::= '(' func_args_list ')' | '(' ')'\n func_args_list ::= func_arg | func_args_list ',' func_arg\n function_with_argtypes_list ::= function_with_argtypes |\n function_with_argtypes_list ',' function_with_argtypes\n function_with_argtypes ::= func_name func_args |\n type_func_name_keyword | ColId | ColId indirection\n func_args_with_defaults ::= '(' func_args_with_defaults_list ')'\n | '(' ')'\n func_args_with_defaults_list ::= func_arg_with_default |\n func_args_with_defaults_list ',' func_arg_with_default\n func_arg ::= arg_class param_name func_type | param_name\n arg_class func_type | param_name func_type | arg_class func_type\n | func_type\n arg_class ::= IN_P | OUT_P | INOUT | IN_P OUT_P | VARIADIC\n param_name ::= type_function_name\n func_return ::= func_type\n func_type ::= Typename | type_function_name attrs '%' TYPE_P |\n SETOF type_function_name attrs '%' TYPE_P\n func_arg_with_default ::= func_arg | func_arg DEFAULT a_expr |\n func_arg '=' a_expr\n aggr_arg ::= func_arg\n aggr_args ::= '(' '*' ')' | '(' aggr_args_list ')' | '(' ORDER\n BY aggr_args_list ')' | '(' aggr_args_list ORDER BY\n aggr_args_list ')'\n aggr_args_list ::= aggr_arg | aggr_args_list ',' aggr_arg\n aggregate_with_argtypes ::= func_name aggr_args\n aggregate_with_argtypes_list ::= aggregate_with_argtypes |\n aggregate_with_argtypes_list ',' aggregate_with_argtypes\n createfunc_opt_list ::= createfunc_opt_item |\n createfunc_opt_list createfunc_opt_item\n common_func_opt_item ::= CALLED ON NULL_P INPUT_P | RETURNS\n NULL_P ON NULL_P INPUT_P | STRICT_P | IMMUTABLE | STABLE |\n VOLATILE | EXTERNAL SECURITY DEFINER | EXTERNAL SECURITY INVOKER\n | SECURITY DEFINER | SECURITY INVOKER | LEAKPROOF | NOT\n LEAKPROOF | COST NumericOnly | ROWS NumericOnly | SUPPORT\n any_name | FunctionSetResetClause | PARALLEL ColId\n createfunc_opt_item ::= AS func_as | LANGUAGE\n NonReservedWord_or_Sconst | TRANSFORM transform_type_list |\n WINDOW | common_func_opt_item\n func_as ::= Sconst | Sconst ',' Sconst\n transform_type_list ::= FOR TYPE_P Typename |\n transform_type_list ',' FOR TYPE_P Typename\n opt_definition ::= WITH definition |\n table_func_column ::= param_name func_type\n table_func_column_list ::= table_func_column |\n table_func_column_list ',' table_func_column\n AlterFunctionStmt ::= ALTER FUNCTION function_with_argtypes\n alterfunc_opt_list opt_restrict | ALTER PROCEDURE\n function_with_argtypes alterfunc_opt_list opt_restrict | ALTER\n ROUTINE function_with_argtypes alterfunc_opt_list opt_restrict\n alterfunc_opt_list ::= common_func_opt_item | alterfunc_opt_list\n common_func_opt_item\n opt_restrict ::= RESTRICT | /*empty*/\n RemoveFuncStmt ::= DROP FUNCTION function_with_argtypes_list\n opt_drop_behavior | DROP FUNCTION IF_P EXISTS\n function_with_argtypes_list opt_drop_behavior | DROP PROCEDURE\n function_with_argtypes_list opt_drop_behavior | DROP PROCEDURE\n IF_P EXISTS function_with_argtypes_list opt_drop_behavior | DROP\n ROUTINE function_with_argtypes_list opt_drop_behavior | DROP\n ROUTINE IF_P EXISTS function_with_argtypes_list\n opt_drop_behavior\n RemoveAggrStmt ::= DROP AGGREGATE aggregate_with_argtypes_list\n opt_drop_behavior | DROP AGGREGATE IF_P EXISTS\n aggregate_with_argtypes_list opt_drop_behavior\n RemoveOperStmt ::= DROP OPERATOR operator_with_argtypes_list\n opt_drop_behavior | DROP OPERATOR IF_P EXISTS\n operator_with_argtypes_list opt_drop_behavior\n oper_argtypes ::= '(' Typename ')' | '(' Typename ',' Typename\n ')' | '(' NONE ',' Typename ')' | '(' Typename ',' NONE ')'\n any_operator ::= all_Op | ColId '.' any_operator\n operator_with_argtypes_list ::= operator_with_argtypes |\n operator_with_argtypes_list ',' operator_with_argtypes\n operator_with_argtypes ::= any_operator oper_argtypes\n DoStmt ::= DO dostmt_opt_list\n dostmt_opt_list ::= dostmt_opt_item | dostmt_opt_list\n dostmt_opt_item\n dostmt_opt_item ::= Sconst | LANGUAGE NonReservedWord_or_Sconst\n CreateCastStmt ::= CREATE CAST '(' Typename AS Typename ')' WITH\n FUNCTION function_with_argtypes cast_context | CREATE CAST '('\n Typename AS Typename ')' WITHOUT FUNCTION cast_context | CREATE\n CAST '(' Typename AS Typename ')' WITH INOUT cast_context\n cast_context ::= AS IMPLICIT_P | AS ASSIGNMENT |\n DropCastStmt ::= DROP CAST opt_if_exists '(' Typename AS\n Typename ')' opt_drop_behavior\n opt_if_exists ::= IF_P EXISTS |\n CreateTransformStmt ::= CREATE opt_or_replace TRANSFORM FOR\n Typename LANGUAGE name '(' transform_element_list ')'\n transform_element_list ::= FROM SQL_P WITH FUNCTION\n function_with_argtypes ',' TO SQL_P WITH FUNCTION\n function_with_argtypes | TO SQL_P WITH FUNCTION\n function_with_argtypes ',' FROM SQL_P WITH FUNCTION\n function_with_argtypes | FROM SQL_P WITH FUNCTION\n function_with_argtypes | TO SQL_P WITH FUNCTION\n function_with_argtypes\n DropTransformStmt ::= DROP TRANSFORM opt_if_exists FOR Typename\n LANGUAGE name opt_drop_behavior\n ReindexStmt ::= REINDEX reindex_target_type opt_concurrently\n qualified_name | REINDEX reindex_target_multitable\n opt_concurrently name | REINDEX '(' reindex_option_list ')'\n reindex_target_type opt_concurrently qualified_name | REINDEX\n '(' reindex_option_list ')' reindex_target_multitable\n opt_concurrently name\n reindex_target_type ::= INDEX | TABLE\n reindex_target_multitable ::= SCHEMA | SYSTEM_P | DATABASE\n reindex_option_list ::= reindex_option_elem |\n reindex_option_list ',' reindex_option_elem\n reindex_option_elem ::= VERBOSE\n AlterTblSpcStmt ::= ALTER TABLESPACE name SET reloptions | ALTER\n TABLESPACE name RESET reloptions\n RenameStmt ::= ALTER AGGREGATE aggregate_with_argtypes RENAME TO\n name | ALTER COLLATION any_name RENAME TO name | ALTER\n CONVERSION_P any_name RENAME TO name | ALTER DATABASE\n database_name RENAME TO database_name | ALTER DOMAIN_P any_name\n RENAME TO name | ALTER DOMAIN_P any_name RENAME CONSTRAINT name\n TO name | ALTER FOREIGN DATA_P WRAPPER name RENAME TO name |\n ALTER FUNCTION function_with_argtypes RENAME TO name | ALTER\n GROUP_P RoleId RENAME TO RoleId | ALTER opt_procedural LANGUAGE\n name RENAME TO name | ALTER OPERATOR CLASS any_name USING\n access_method RENAME TO name | ALTER OPERATOR FAMILY any_name\n USING access_method RENAME TO name | ALTER POLICY name ON\n qualified_name RENAME TO name | ALTER POLICY IF_P EXISTS name ON\n qualified_name RENAME TO name | ALTER PROCEDURE\n function_with_argtypes RENAME TO name | ALTER PUBLICATION name\n RENAME TO name | ALTER ROUTINE function_with_argtypes RENAME TO\n name | ALTER SCHEMA name RENAME TO name | ALTER SERVER name\n RENAME TO name | ALTER SUBSCRIPTION name RENAME TO name | ALTER\n TABLE relation_expr RENAME TO name | ALTER TABLE IF_P EXISTS\n relation_expr RENAME TO name | ALTER SEQUENCE qualified_name\n RENAME TO name | ALTER SEQUENCE IF_P EXISTS qualified_name\n RENAME TO name | ALTER VIEW qualified_name RENAME TO name |\n ALTER VIEW IF_P EXISTS qualified_name RENAME TO name | ALTER\n MATERIALIZED VIEW qualified_name RENAME TO name | ALTER\n MATERIALIZED VIEW IF_P EXISTS qualified_name RENAME TO name |\n ALTER INDEX qualified_name RENAME TO name | ALTER INDEX IF_P\n EXISTS qualified_name RENAME TO name | ALTER FOREIGN TABLE\n relation_expr RENAME TO name | ALTER FOREIGN TABLE IF_P EXISTS\n relation_expr RENAME TO name | ALTER TABLE relation_expr RENAME\n opt_column name TO name | ALTER TABLE IF_P EXISTS relation_expr\n RENAME opt_column name TO name | ALTER VIEW qualified_name\n RENAME opt_column name TO name | ALTER VIEW IF_P EXISTS\n qualified_name RENAME opt_column name TO name | ALTER\n MATERIALIZED VIEW qualified_name RENAME opt_column name TO name\n | ALTER MATERIALIZED VIEW IF_P EXISTS qualified_name RENAME\n opt_column name TO name | ALTER TABLE relation_expr RENAME\n CONSTRAINT name TO name | ALTER TABLE IF_P EXISTS relation_expr\n RENAME CONSTRAINT name TO name | ALTER FOREIGN TABLE\n relation_expr RENAME opt_column name TO name | ALTER FOREIGN\n TABLE IF_P EXISTS relation_expr RENAME opt_column name TO name |\n ALTER RULE name ON qualified_name RENAME TO name | ALTER TRIGGER\n name ON qualified_name RENAME TO name | ALTER EVENT TRIGGER name\n RENAME TO name | ALTER ROLE RoleId RENAME TO RoleId | ALTER USER\n RoleId RENAME TO RoleId | ALTER TABLESPACE name RENAME TO name |\n ALTER STATISTICS any_name RENAME TO name | ALTER TEXT_P SEARCH\n PARSER any_name RENAME TO name | ALTER TEXT_P SEARCH DICTIONARY\n any_name RENAME TO name | ALTER TEXT_P SEARCH TEMPLATE any_name\n RENAME TO name | ALTER TEXT_P SEARCH CONFIGURATION any_name\n RENAME TO name | ALTER TYPE_P any_name RENAME TO name | ALTER\n TYPE_P any_name RENAME ATTRIBUTE name TO name opt_drop_behavior\n opt_column ::= COLUMN |\n opt_set_data ::= SET DATA_P |\n AlterObjectDependsStmt ::= ALTER FUNCTION function_with_argtypes\n opt_no DEPENDS ON EXTENSION name | ALTER PROCEDURE\n function_with_argtypes opt_no DEPENDS ON EXTENSION name | ALTER\n ROUTINE function_with_argtypes opt_no DEPENDS ON EXTENSION name\n | ALTER TRIGGER name ON qualified_name opt_no DEPENDS ON\n EXTENSION name | ALTER MATERIALIZED VIEW qualified_name opt_no\n DEPENDS ON EXTENSION name | ALTER INDEX qualified_name opt_no\n DEPENDS ON EXTENSION name\n opt_no ::= NO |\n AlterObjectSchemaStmt ::= ALTER AGGREGATE\n aggregate_with_argtypes SET SCHEMA name | ALTER COLLATION\n any_name SET SCHEMA name | ALTER CONVERSION_P any_name SET\n SCHEMA name | ALTER DOMAIN_P any_name SET SCHEMA name | ALTER\n EXTENSION name SET SCHEMA name | ALTER FUNCTION\n function_with_argtypes SET SCHEMA name | ALTER OPERATOR\n operator_with_argtypes SET SCHEMA name | ALTER OPERATOR CLASS\n any_name USING access_method SET SCHEMA name | ALTER OPERATOR\n FAMILY any_name USING access_method SET SCHEMA name | ALTER\n PROCEDURE function_with_argtypes SET SCHEMA name | ALTER ROUTINE\n function_with_argtypes SET SCHEMA name | ALTER TABLE\n relation_expr SET SCHEMA name | ALTER TABLE IF_P EXISTS\n relation_expr SET SCHEMA name | ALTER STATISTICS any_name SET\n SCHEMA name | ALTER TEXT_P SEARCH PARSER any_name SET SCHEMA\n name | ALTER TEXT_P SEARCH DICTIONARY any_name SET SCHEMA name |\n ALTER TEXT_P SEARCH TEMPLATE any_name SET SCHEMA name | ALTER\n TEXT_P SEARCH CONFIGURATION any_name SET SCHEMA name | ALTER\n SEQUENCE qualified_name SET SCHEMA name | ALTER SEQUENCE IF_P\n EXISTS qualified_name SET SCHEMA name | ALTER VIEW\n qualified_name SET SCHEMA name | ALTER VIEW IF_P EXISTS\n qualified_name SET SCHEMA name | ALTER MATERIALIZED VIEW\n qualified_name SET SCHEMA name | ALTER MATERIALIZED VIEW IF_P\n EXISTS qualified_name SET SCHEMA name | ALTER FOREIGN TABLE\n relation_expr SET SCHEMA name | ALTER FOREIGN TABLE IF_P EXISTS\n relation_expr SET SCHEMA name | ALTER TYPE_P any_name SET SCHEMA\n name\n AlterOperatorStmt ::= ALTER OPERATOR operator_with_argtypes SET\n '(' operator_def_list ')'\n operator_def_list ::= operator_def_elem | operator_def_list ','\n operator_def_elem\n operator_def_elem ::= ColLabel '=' NONE | ColLabel '='\n operator_def_arg\n operator_def_arg ::= func_type | reserved_keyword | qual_all_Op\n | NumericOnly | Sconst\n AlterTypeStmt ::= ALTER TYPE_P any_name SET '('\n operator_def_list ')'\n AlterOwnerStmt ::= ALTER AGGREGATE aggregate_with_argtypes OWNER\n TO RoleSpec | ALTER COLLATION any_name OWNER TO RoleSpec | ALTER\n CONVERSION_P any_name OWNER TO RoleSpec | ALTER DATABASE\n database_name OWNER TO RoleSpec | ALTER DOMAIN_P any_name OWNER\n TO RoleSpec | ALTER FUNCTION function_with_argtypes OWNER TO\n RoleSpec | ALTER opt_procedural LANGUAGE name OWNER TO RoleSpec\n | ALTER LARGE_P OBJECT_P NumericOnly OWNER TO RoleSpec | ALTER\n OPERATOR operator_with_argtypes OWNER TO RoleSpec | ALTER\n OPERATOR CLASS any_name USING access_method OWNER TO RoleSpec |\n ALTER OPERATOR FAMILY any_name USING access_method OWNER TO\n RoleSpec | ALTER PROCEDURE function_with_argtypes OWNER TO\n RoleSpec | ALTER ROUTINE function_with_argtypes OWNER TO\n RoleSpec | ALTER SCHEMA name OWNER TO RoleSpec | ALTER TYPE_P\n any_name OWNER TO RoleSpec | ALTER TABLESPACE name OWNER TO\n RoleSpec | ALTER STATISTICS any_name OWNER TO RoleSpec | ALTER\n TEXT_P SEARCH DICTIONARY any_name OWNER TO RoleSpec | ALTER\n TEXT_P SEARCH CONFIGURATION any_name OWNER TO RoleSpec | ALTER\n FOREIGN DATA_P WRAPPER name OWNER TO RoleSpec | ALTER SERVER\n name OWNER TO RoleSpec | ALTER EVENT TRIGGER name OWNER TO\n RoleSpec | ALTER PUBLICATION name OWNER TO RoleSpec | ALTER\n SUBSCRIPTION name OWNER TO RoleSpec\n CreatePublicationStmt ::= CREATE PUBLICATION name\n opt_publication_for_tables opt_definition\n opt_publication_for_tables ::= publication_for_tables |\n publication_for_tables ::= FOR TABLE relation_expr_list | FOR\n ALL TABLES\n AlterPublicationStmt ::= ALTER PUBLICATION name SET definition |\n ALTER PUBLICATION name ADD_P TABLE relation_expr_list | ALTER\n PUBLICATION name SET TABLE relation_expr_list | ALTER\n PUBLICATION name DROP TABLE relation_expr_list\n CreateSubscriptionStmt ::= CREATE SUBSCRIPTION name CONNECTION\n Sconst PUBLICATION publication_name_list opt_definition\n publication_name_list ::= publication_name_item |\n publication_name_list ',' publication_name_item\n publication_name_item ::= ColLabel\n AlterSubscriptionStmt ::= ALTER SUBSCRIPTION name SET definition\n | ALTER SUBSCRIPTION name CONNECTION Sconst | ALTER SUBSCRIPTION\n name REFRESH PUBLICATION opt_definition | ALTER SUBSCRIPTION\n name SET PUBLICATION publication_name_list opt_definition |\n ALTER SUBSCRIPTION name ENABLE_P | ALTER SUBSCRIPTION name\n DISABLE_P\n DropSubscriptionStmt ::= DROP SUBSCRIPTION name\n opt_drop_behavior | DROP SUBSCRIPTION IF_P EXISTS name\n opt_drop_behavior\n RuleStmt ::= CREATE opt_or_replace RULE name AS ON event TO\n qualified_name where_clause DO opt_instead RuleActionList\n RuleActionList ::= NOTHING | RuleActionStmt | '('\n RuleActionMulti ')'\n RuleActionMulti ::= RuleActionMulti ';' RuleActionStmtOrEmpty |\n RuleActionStmtOrEmpty\n RuleActionStmt ::= SelectStmt | InsertStmt | UpdateStmt |\n DeleteStmt | NotifyStmt\n RuleActionStmtOrEmpty ::= RuleActionStmt |\n event ::= SELECT | UPDATE | DELETE_P | INSERT\n opt_instead ::= INSTEAD | ALSO |\n NotifyStmt ::= NOTIFY ColId notify_payload\n notify_payload ::= ',' Sconst |\n ListenStmt ::= LISTEN ColId\n UnlistenStmt ::= UNLISTEN ColId | UNLISTEN '*'\n TransactionStmt ::= ABORT_P opt_transaction\n opt_transaction_chain | BEGIN_P opt_transaction\n transaction_mode_list_or_empty | START TRANSACTION\n transaction_mode_list_or_empty | COMMIT opt_transaction\n opt_transaction_chain | END_P opt_transaction\n opt_transaction_chain | ROLLBACK opt_transaction\n opt_transaction_chain | SAVEPOINT ColId | RELEASE SAVEPOINT\n ColId | RELEASE ColId | ROLLBACK opt_transaction TO SAVEPOINT\n ColId | ROLLBACK opt_transaction TO ColId | PREPARE TRANSACTION\n Sconst | COMMIT PREPARED Sconst | ROLLBACK PREPARED Sconst\n opt_transaction ::= WORK | TRANSACTION |\n transaction_mode_item ::= ISOLATION LEVEL iso_level | READ ONLY\n | READ WRITE | DEFERRABLE | NOT DEFERRABLE\n transaction_mode_list ::= transaction_mode_item |\n transaction_mode_list ',' transaction_mode_item |\n transaction_mode_list transaction_mode_item\n transaction_mode_list_or_empty ::= transaction_mode_list |\n opt_transaction_chain ::= AND CHAIN | AND NO CHAIN |\n ViewStmt ::= CREATE OptTemp VIEW qualified_name opt_column_list\n opt_reloptions AS SelectStmt opt_check_option | CREATE OR\n REPLACE OptTemp VIEW qualified_name opt_column_list\n opt_reloptions AS SelectStmt opt_check_option | CREATE OptTemp\n RECURSIVE VIEW qualified_name '(' columnList ')' opt_reloptions\n AS SelectStmt opt_check_option | CREATE OR REPLACE OptTemp\n RECURSIVE VIEW qualified_name '(' columnList ')' opt_reloptions\n AS SelectStmt opt_check_option\n opt_check_option ::= WITH CHECK OPTION | WITH CASCADED CHECK\n OPTION | WITH LOCAL CHECK OPTION |\n LoadStmt ::= LOAD file_name\n CreatedbStmt ::= CREATE DATABASE database_name opt_with\n createdb_opt_list\n createdb_opt_list ::= createdb_opt_items |\n createdb_opt_items ::= createdb_opt_item | createdb_opt_items\n createdb_opt_item\n createdb_opt_item ::= createdb_opt_name opt_equal SignedIconst |\n createdb_opt_name opt_equal opt_boolean_or_string |\n createdb_opt_name opt_equal DEFAULT\n createdb_opt_name ::= IDENT | CONNECTION LIMIT | ENCODING |\n LOCATION | OWNER | TABLESPACE | TEMPLATE\n opt_equal ::= '=' |\n AlterDatabaseStmt ::= ALTER DATABASE database_name WITH\n createdb_opt_list | ALTER DATABASE database_name\n createdb_opt_list | ALTER DATABASE database_name SET TABLESPACE\n name\n AlterDatabaseSetStmt ::= ALTER DATABASE database_name\n SetResetClause\n DropdbStmt ::= DROP DATABASE database_name | DROP DATABASE IF_P\n EXISTS database_name | DROP DATABASE database_name opt_with '('\n drop_option_list ')' | DROP DATABASE IF_P EXISTS database_name\n opt_with '(' drop_option_list ')'\n drop_option_list ::= drop_option | drop_option_list ','\n drop_option\n drop_option ::= FORCE\n AlterCollationStmt ::= ALTER COLLATION any_name REFRESH\n VERSION_P\n AlterSystemStmt ::= ALTER SYSTEM_P SET generic_set | ALTER\n SYSTEM_P RESET generic_reset\n CreateDomainStmt ::= CREATE DOMAIN_P any_name opt_as Typename\n ColQualList\n AlterDomainStmt ::= ALTER DOMAIN_P any_name alter_column_default\n | ALTER DOMAIN_P any_name DROP NOT NULL_P | ALTER DOMAIN_P\n any_name SET NOT NULL_P | ALTER DOMAIN_P any_name ADD_P\n TableConstraint | ALTER DOMAIN_P any_name DROP CONSTRAINT name\n opt_drop_behavior | ALTER DOMAIN_P any_name DROP CONSTRAINT IF_P\n EXISTS name opt_drop_behavior | ALTER DOMAIN_P any_name VALIDATE\n CONSTRAINT name\n opt_as ::= AS |\n AlterTSDictionaryStmt ::= ALTER TEXT_P SEARCH DICTIONARY\n any_name definition\n AlterTSConfigurationStmt ::= ALTER TEXT_P SEARCH CONFIGURATION\n any_name ADD_P MAPPING FOR name_list any_with any_name_list |\n ALTER TEXT_P SEARCH CONFIGURATION any_name ALTER MAPPING FOR\n name_list any_with any_name_list | ALTER TEXT_P SEARCH\n CONFIGURATION any_name ALTER MAPPING REPLACE any_name any_with\n any_name | ALTER TEXT_P SEARCH CONFIGURATION any_name ALTER\n MAPPING FOR name_list REPLACE any_name any_with any_name | ALTER\n TEXT_P SEARCH CONFIGURATION any_name DROP MAPPING FOR name_list\n | ALTER TEXT_P SEARCH CONFIGURATION any_name DROP MAPPING IF_P\n EXISTS FOR name_list\n any_with ::= WITH | WITH_LA\n CreateConversionStmt ::= CREATE opt_default CONVERSION_P\n any_name FOR Sconst TO Sconst FROM any_name\n ClusterStmt ::= CLUSTER opt_verbose qualified_name\n cluster_index_specification | CLUSTER opt_verbose | CLUSTER\n opt_verbose index_name ON qualified_name\n cluster_index_specification ::= USING index_name |\n VacuumStmt ::= VACUUM opt_full opt_freeze opt_verbose\n opt_analyze opt_vacuum_relation_list | VACUUM '('\n vac_analyze_option_list ')' opt_vacuum_relation_list\n AnalyzeStmt ::= analyze_keyword opt_verbose\n opt_vacuum_relation_list | analyze_keyword '('\n vac_analyze_option_list ')' opt_vacuum_relation_list\n vac_analyze_option_list ::= vac_analyze_option_elem |\n vac_analyze_option_list ',' vac_analyze_option_elem\n analyze_keyword ::= ANALYZE | ANALYSE\n vac_analyze_option_elem ::= vac_analyze_option_name\n vac_analyze_option_arg\n vac_analyze_option_name ::= NonReservedWord | analyze_keyword\n vac_analyze_option_arg ::= opt_boolean_or_string | NumericOnly |\n opt_analyze ::= analyze_keyword |\n opt_verbose ::= VERBOSE |\n opt_full ::= FULL |\n opt_freeze ::= FREEZE |\n opt_name_list ::= '(' name_list ')' |\n vacuum_relation ::= qualified_name opt_name_list\n vacuum_relation_list ::= vacuum_relation | vacuum_relation_list\n ',' vacuum_relation\n opt_vacuum_relation_list ::= vacuum_relation_list |\n ExplainStmt ::= EXPLAIN ExplainableStmt | EXPLAIN\n analyze_keyword opt_verbose ExplainableStmt | EXPLAIN VERBOSE\n ExplainableStmt | EXPLAIN '(' explain_option_list ')'\n ExplainableStmt\n ExplainableStmt ::= SelectStmt | InsertStmt | UpdateStmt |\n DeleteStmt | DeclareCursorStmt | CreateAsStmt |\n CreateMatViewStmt | RefreshMatViewStmt | ExecuteStmt\n explain_option_list ::= explain_option_elem |\n explain_option_list ',' explain_option_elem\n explain_option_elem ::= explain_option_name explain_option_arg\n explain_option_name ::= NonReservedWord | analyze_keyword\n explain_option_arg ::= opt_boolean_or_string | NumericOnly |\n PrepareStmt ::= PREPARE name prep_type_clause AS PreparableStmt\n prep_type_clause ::= '(' type_list ')' |\n PreparableStmt ::= SelectStmt | InsertStmt | UpdateStmt |\n DeleteStmt\n ExecuteStmt ::= EXECUTE name execute_param_clause | CREATE\n OptTemp TABLE create_as_target AS EXECUTE name\n execute_param_clause opt_with_data | CREATE OptTemp TABLE IF_P\n NOT EXISTS create_as_target AS EXECUTE name execute_param_clause\n opt_with_data\n execute_param_clause ::= '(' expr_list ')' |\n DeallocateStmt ::= DEALLOCATE name | DEALLOCATE PREPARE name |\n DEALLOCATE ALL | DEALLOCATE PREPARE ALL\n InsertStmt ::= opt_with_clause INSERT INTO insert_target\n insert_rest opt_on_conflict returning_clause\n insert_target ::= qualified_name | qualified_name AS ColId\n insert_rest ::= SelectStmt | OVERRIDING override_kind VALUE_P\n SelectStmt | '(' insert_column_list ')' SelectStmt | '('\n insert_column_list ')' OVERRIDING override_kind VALUE_P\n SelectStmt | DEFAULT VALUES\n override_kind ::= USER | SYSTEM_P\n insert_column_list ::= insert_column_item | insert_column_list\n ',' insert_column_item\n insert_column_item ::= ColId opt_indirection\n opt_on_conflict ::= ON CONFLICT opt_conf_expr DO UPDATE SET\n set_clause_list where_clause | ON CONFLICT opt_conf_expr DO\n NOTHING |\n opt_conf_expr ::= '(' index_params ')' where_clause | ON\n CONSTRAINT name |\n returning_clause ::= RETURNING target_list |\n DeleteStmt ::= opt_with_clause DELETE_P FROM\n relation_expr_opt_alias using_clause where_or_current_clause\n returning_clause\n using_clause ::= USING from_list |\n LockStmt ::= LOCK_P opt_table relation_expr_list opt_lock\n opt_nowait\n opt_lock ::= IN_P lock_type MODE |\n lock_type ::= ACCESS SHARE | ROW SHARE | ROW EXCLUSIVE | SHARE\n UPDATE EXCLUSIVE | SHARE | SHARE ROW EXCLUSIVE | EXCLUSIVE |\n ACCESS EXCLUSIVE\n opt_nowait ::= NOWAIT |\n opt_nowait_or_skip ::= NOWAIT | SKIP LOCKED |\n UpdateStmt ::= opt_with_clause UPDATE relation_expr_opt_alias\n SET set_clause_list from_clause where_or_current_clause\n returning_clause\n set_clause_list ::= set_clause | set_clause_list ',' set_clause\n set_clause ::= set_target '=' a_expr | '(' set_target_list ')'\n '=' a_expr\n set_target ::= ColId opt_indirection\n set_target_list ::= set_target | set_target_list ',' set_target\n DeclareCursorStmt ::= DECLARE cursor_name cursor_options CURSOR\n opt_hold FOR SelectStmt\n cursor_name ::= name\n cursor_options ::= | cursor_options NO SCROLL | cursor_options\n SCROLL | cursor_options BINARY | cursor_options INSENSITIVE\n opt_hold ::= | WITH HOLD | WITHOUT HOLD\n SelectStmt ::= select_no_parens | select_with_parens\n select_with_parens ::= '(' select_no_parens ')' | '('\n select_with_parens ')'\n select_no_parens ::= simple_select | select_clause sort_clause |\n select_clause opt_sort_clause for_locking_clause\n opt_select_limit | select_clause opt_sort_clause select_limit\n opt_for_locking_clause | with_clause select_clause | with_clause\n select_clause sort_clause | with_clause select_clause\n opt_sort_clause for_locking_clause opt_select_limit |\n with_clause select_clause opt_sort_clause select_limit\n opt_for_locking_clause\n select_clause ::= simple_select | select_with_parens\n simple_select ::= SELECT opt_all_clause opt_target_list\n into_clause from_clause where_clause group_clause having_clause\n window_clause | SELECT distinct_clause target_list into_clause\n from_clause where_clause group_clause having_clause\n window_clause | values_clause | TABLE relation_expr |\n select_clause UNION all_or_distinct select_clause |\n select_clause INTERSECT all_or_distinct select_clause |\n select_clause EXCEPT all_or_distinct select_clause\n with_clause ::= WITH cte_list | WITH_LA cte_list | WITH\n RECURSIVE cte_list\n cte_list ::= common_table_expr | cte_list ',' common_table_expr\n common_table_expr ::= name opt_name_list AS opt_materialized '('\n PreparableStmt ')'\n opt_materialized ::= MATERIALIZED | NOT MATERIALIZED |\n opt_with_clause ::= with_clause |\n into_clause ::= INTO OptTempTableName |\n OptTempTableName ::= TEMPORARY opt_table qualified_name | TEMP\n opt_table qualified_name | LOCAL TEMPORARY opt_table\n qualified_name | LOCAL TEMP opt_table qualified_name | GLOBAL\n TEMPORARY opt_table qualified_name | GLOBAL TEMP opt_table\n qualified_name | UNLOGGED opt_table qualified_name | TABLE\n qualified_name | qualified_name\n opt_table ::= TABLE |\n all_or_distinct ::= ALL | DISTINCT |\n distinct_clause ::= DISTINCT | DISTINCT ON '(' expr_list ')'\n opt_all_clause ::= ALL |\n opt_sort_clause ::= sort_clause |\n sort_clause ::= ORDER BY sortby_list\n sortby_list ::= sortby | sortby_list ',' sortby\n sortby ::= a_expr USING qual_all_Op opt_nulls_order | a_expr\n opt_asc_desc opt_nulls_order\n select_limit ::= limit_clause offset_clause | offset_clause\n limit_clause | limit_clause | offset_clause\n opt_select_limit ::= select_limit |\n limit_clause ::= LIMIT select_limit_value | LIMIT\n select_limit_value ',' select_offset_value | FETCH first_or_next\n select_fetch_first_value row_or_rows ONLY | FETCH first_or_next\n select_fetch_first_value row_or_rows WITH TIES | FETCH\n first_or_next row_or_rows ONLY | FETCH first_or_next row_or_rows\n WITH TIES\n offset_clause ::= OFFSET select_offset_value | OFFSET\n select_fetch_first_value row_or_rows\n select_limit_value ::= a_expr | ALL\n select_offset_value ::= a_expr\n select_fetch_first_value ::= c_expr | '+' I_or_F_const | '-'\n I_or_F_const\n I_or_F_const ::= Iconst | FCONST\n row_or_rows ::= ROW | ROWS\n first_or_next ::= FIRST_P | NEXT\n group_clause ::= GROUP_P BY group_by_list |\n group_by_list ::= group_by_item | group_by_list ','\n group_by_item\n group_by_item ::= a_expr | empty_grouping_set | cube_clause |\n rollup_clause | grouping_sets_clause\n empty_grouping_set ::= '(' ')'\n rollup_clause ::= ROLLUP '(' expr_list ')'\n cube_clause ::= CUBE '(' expr_list ')'\n grouping_sets_clause ::= GROUPING SETS '(' group_by_list ')'\n having_clause ::= HAVING a_expr |\n for_locking_clause ::= for_locking_items | FOR READ ONLY\n opt_for_locking_clause ::= for_locking_clause |\n for_locking_items ::= for_locking_item | for_locking_items\n for_locking_item\n for_locking_item ::= for_locking_strength locked_rels_list\n opt_nowait_or_skip\n for_locking_strength ::= FOR UPDATE | FOR NO KEY UPDATE | FOR\n SHARE | FOR KEY SHARE\n locked_rels_list ::= OF qualified_name_list |\n values_clause ::= VALUES '(' expr_list ')' | values_clause ','\n '(' expr_list ')'\n from_clause ::= FROM from_list |\n from_list ::= table_ref | from_list ',' table_ref\n table_ref ::= relation_expr opt_alias_clause | relation_expr\n opt_alias_clause tablesample_clause | func_table\n func_alias_clause | LATERAL_P func_table func_alias_clause |\n xmltable opt_alias_clause | LATERAL_P xmltable opt_alias_clause\n | select_with_parens opt_alias_clause | LATERAL_P\n select_with_parens opt_alias_clause | joined_table | '('\n joined_table ')' alias_clause\n joined_table ::= '(' joined_table ')' | table_ref CROSS JOIN\n table_ref | table_ref join_type JOIN table_ref join_qual |\n table_ref JOIN table_ref join_qual | table_ref NATURAL join_type\n JOIN table_ref | table_ref NATURAL JOIN table_ref\n alias_clause ::= AS ColId '(' name_list ')' | AS ColId | ColId\n '(' name_list ')' | ColId\n opt_alias_clause ::= alias_clause |\n func_alias_clause ::= alias_clause | AS '(' TableFuncElementList\n ')' | AS ColId '(' TableFuncElementList ')' | ColId '('\n TableFuncElementList ')' |\n join_type ::= FULL join_outer | LEFT join_outer | RIGHT\n join_outer | INNER_P\n join_outer ::= OUTER_P |\n join_qual ::= USING '(' name_list ')' | ON a_expr\n relation_expr ::= qualified_name | qualified_name '*' | ONLY\n qualified_name | ONLY '(' qualified_name ')'\n relation_expr_list ::= relation_expr | relation_expr_list ','\n relation_expr\n relation_expr_opt_alias ::= relation_expr | relation_expr ColId\n | relation_expr AS ColId\n tablesample_clause ::= TABLESAMPLE func_name '(' expr_list ')'\n opt_repeatable_clause\n opt_repeatable_clause ::= REPEATABLE '(' a_expr ')' |\n func_table ::= func_expr_windowless opt_ordinality | ROWS FROM\n '(' rowsfrom_list ')' opt_ordinality\n rowsfrom_item ::= func_expr_windowless opt_col_def_list\n rowsfrom_list ::= rowsfrom_item | rowsfrom_list ','\n rowsfrom_item\n opt_col_def_list ::= AS '(' TableFuncElementList ')' |\n opt_ordinality ::= WITH_LA ORDINALITY |\n where_clause ::= WHERE a_expr |\n where_or_current_clause ::= WHERE a_expr | WHERE CURRENT_P OF\n cursor_name |\n OptTableFuncElementList ::= TableFuncElementList |\n TableFuncElementList ::= TableFuncElement | TableFuncElementList\n ',' TableFuncElement\n TableFuncElement ::= ColId Typename opt_collate_clause\n xmltable ::= XMLTABLE '(' c_expr xmlexists_argument COLUMNS\n xmltable_column_list ')' | XMLTABLE '(' XMLNAMESPACES '('\n xml_namespace_list ')' ',' c_expr xmlexists_argument COLUMNS\n xmltable_column_list ')'\n xmltable_column_list ::= xmltable_column_el |\n xmltable_column_list ',' xmltable_column_el\n xmltable_column_el ::= ColId Typename | ColId Typename\n xmltable_column_option_list | ColId FOR ORDINALITY\n xmltable_column_option_list ::= xmltable_column_option_el |\n xmltable_column_option_list xmltable_column_option_el\n xmltable_column_option_el ::= IDENT b_expr | DEFAULT b_expr |\n NOT NULL_P | NULL_P\n xml_namespace_list ::= xml_namespace_el | xml_namespace_list ','\n xml_namespace_el\n xml_namespace_el ::= b_expr AS ColLabel | DEFAULT b_expr\n Typename ::= SimpleTypename opt_array_bounds | SETOF\n SimpleTypename opt_array_bounds | SimpleTypename ARRAY '['\n Iconst ']' | SETOF SimpleTypename ARRAY '[' Iconst ']' |\n SimpleTypename ARRAY | SETOF SimpleTypename ARRAY\n opt_array_bounds ::= opt_array_bounds '[' ']' | opt_array_bounds\n '[' Iconst ']' |\n SimpleTypename ::= GenericType | Numeric | Bit | Character |\n ConstDatetime | ConstInterval opt_interval | ConstInterval '('\n Iconst ')'\n ConstTypename ::= Numeric | ConstBit | ConstCharacter |\n ConstDatetime\n GenericType ::= type_function_name opt_type_modifiers |\n type_function_name attrs opt_type_modifiers\n opt_type_modifiers ::= '(' expr_list ')' |\n Numeric ::= INT_P | INTEGER | SMALLINT | BIGINT | REAL | FLOAT_P\n opt_float | DOUBLE_P PRECISION | DECIMAL_P opt_type_modifiers |\n DEC opt_type_modifiers | NUMERIC opt_type_modifiers | BOOLEAN_P\n opt_float ::= '(' Iconst ')' |\n Bit ::= BitWithLength | BitWithoutLength\n ConstBit ::= BitWithLength | BitWithoutLength\n BitWithLength ::= BIT opt_varying '(' expr_list ')'\n BitWithoutLength ::= BIT opt_varying\n Character ::= CharacterWithLength | CharacterWithoutLength\n ConstCharacter ::= CharacterWithLength | CharacterWithoutLength\n CharacterWithLength ::= character '(' Iconst ')'\n CharacterWithoutLength ::= character\n character ::= CHARACTER opt_varying | CHAR_P opt_varying |\n VARCHAR | NATIONAL CHARACTER opt_varying | NATIONAL CHAR_P\n opt_varying | NCHAR opt_varying\n opt_varying ::= VARYING |\n ConstDatetime ::= TIMESTAMP '(' Iconst ')' opt_timezone |\n TIMESTAMP opt_timezone | TIME '(' Iconst ')' opt_timezone | TIME\n opt_timezone\n ConstInterval ::= INTERVAL\n opt_timezone ::= WITH_LA TIME ZONE | WITHOUT TIME ZONE |\n opt_interval ::= YEAR_P | MONTH_P | DAY_P | HOUR_P | MINUTE_P |\n interval_second | YEAR_P TO MONTH_P | DAY_P TO HOUR_P | DAY_P TO\n MINUTE_P | DAY_P TO interval_second | HOUR_P TO MINUTE_P |\n HOUR_P TO interval_second | MINUTE_P TO interval_second |\n interval_second ::= SECOND_P | SECOND_P '(' Iconst ')'\n a_expr ::= c_expr | a_expr TYPECAST Typename | a_expr COLLATE\n any_name | a_expr AT TIME ZONE a_expr | '+' a_expr | '-' a_expr\n | a_expr '+' a_expr | a_expr '-' a_expr | a_expr '*' a_expr |\n a_expr '/' a_expr | a_expr '%' a_expr | a_expr '^' a_expr |\n a_expr '<' a_expr | a_expr '>' a_expr | a_expr '=' a_expr\n | a_expr LESS_EQUALS a_expr | a_expr GREATER_EQUALS a_expr |\n a_expr NOT_EQUALS a_expr | a_expr qual_Op a_expr | qual_Op\n a_expr | a_expr qual_Op | a_expr AND a_expr | a_expr OR a_expr |\n NOT a_expr | NOT_LA a_expr | a_expr LIKE a_expr | a_expr LIKE\n a_expr ESCAPE a_expr | a_expr NOT_LA LIKE a_expr | a_expr NOT_LA\n LIKE a_expr ESCAPE a_expr | a_expr ILIKE a_expr | a_expr ILIKE\n a_expr ESCAPE a_expr | a_expr NOT_LA ILIKE a_expr | a_expr\n NOT_LA ILIKE a_expr ESCAPE a_expr | a_expr SIMILAR TO a_expr |\n a_expr SIMILAR TO a_expr ESCAPE a_expr | a_expr NOT_LA SIMILAR\n TO a_expr | a_expr NOT_LA SIMILAR TO a_expr ESCAPE a_expr |\n a_expr IS NULL_P | a_expr ISNULL | a_expr IS NOT NULL_P | a_expr\n NOTNULL | row OVERLAPS row | a_expr IS TRUE_P | a_expr IS NOT\n TRUE_P | a_expr IS FALSE_P | a_expr IS NOT FALSE_P | a_expr IS\n UNKNOWN | a_expr IS NOT UNKNOWN | a_expr IS DISTINCT FROM a_expr\n | a_expr IS NOT DISTINCT FROM a_expr | a_expr IS OF '('\n type_list ')' | a_expr IS NOT OF '(' type_list ')' | a_expr\n BETWEEN opt_asymmetric b_expr AND a_expr | a_expr NOT_LA BETWEEN\n opt_asymmetric b_expr AND a_expr | a_expr BETWEEN SYMMETRIC\n b_expr AND a_expr | a_expr NOT_LA BETWEEN SYMMETRIC b_expr AND\n a_expr | a_expr IN_P in_expr | a_expr NOT_LA IN_P in_expr |\n a_expr subquery_Op sub_type select_with_parens | a_expr\n subquery_Op sub_type '(' a_expr ')' | UNIQUE select_with_parens\n | a_expr IS DOCUMENT_P | a_expr IS NOT DOCUMENT_P | a_expr IS\n NORMALIZED | a_expr IS unicode_normal_form NORMALIZED | a_expr\n IS NOT NORMALIZED | a_expr IS NOT unicode_normal_form NORMALIZED\n | DEFAULT\n b_expr ::= c_expr | b_expr TYPECAST Typename | '+' b_expr | '-'\n b_expr | b_expr '+' b_expr | b_expr '-' b_expr | b_expr '*'\n b_expr | b_expr '/' b_expr | b_expr '%' b_expr | b_expr '^'\n b_expr | b_expr '<' b_expr | b_expr '>' b_expr | b_expr\n '=' b_expr | b_expr LESS_EQUALS b_expr | b_expr GREATER_EQUALS\n b_expr | b_expr NOT_EQUALS b_expr | b_expr qual_Op b_expr |\n qual_Op b_expr | b_expr qual_Op | b_expr IS DISTINCT FROM b_expr\n | b_expr IS NOT DISTINCT FROM b_expr | b_expr IS OF '('\n type_list ')' | b_expr IS NOT OF '(' type_list ')' | b_expr IS\n DOCUMENT_P | b_expr IS NOT DOCUMENT_P\n c_expr ::= columnref | AexprConst | PARAM opt_indirection | '('\n a_expr ')' opt_indirection | case_expr | func_expr |\n select_with_parens | select_with_parens indirection | EXISTS\n select_with_parens | ARRAY select_with_parens | ARRAY array_expr\n | explicit_row | implicit_row | GROUPING '(' expr_list ')'\n func_application ::= func_name '(' ')' | func_name '('\n func_arg_list opt_sort_clause ')' | func_name '(' VARIADIC\n func_arg_expr opt_sort_clause ')' | func_name '(' func_arg_list\n ',' VARIADIC func_arg_expr opt_sort_clause ')' | func_name '('\n ALL func_arg_list opt_sort_clause ')' | func_name '(' DISTINCT\n func_arg_list opt_sort_clause ')' | func_name '(' '*' ')'\n func_expr ::= func_application within_group_clause filter_clause\n over_clause | func_expr_common_subexpr\n func_expr_windowless ::= func_application |\n func_expr_common_subexpr\n func_expr_common_subexpr ::= COLLATION FOR '(' a_expr ')' |\n CURRENT_DATE | CURRENT_TIME | CURRENT_TIME '(' Iconst ')' |\n CURRENT_TIMESTAMP | CURRENT_TIMESTAMP '(' Iconst ')' | LOCALTIME\n | LOCALTIME '(' Iconst ')' | LOCALTIMESTAMP | LOCALTIMESTAMP '('\n Iconst ')' | CURRENT_ROLE | CURRENT_USER | SESSION_USER | USER |\n CURRENT_CATALOG | CURRENT_SCHEMA | CAST '(' a_expr AS Typename\n ')' | EXTRACT '(' extract_list ')' | NORMALIZE '(' a_expr ')' |\n NORMALIZE '(' a_expr ',' unicode_normal_form ')' | OVERLAY '('\n overlay_list ')' | POSITION '(' position_list ')' | SUBSTRING\n '(' substr_list ')' | TREAT '(' a_expr AS Typename ')' | TRIM\n '(' BOTH trim_list ')' | TRIM '(' LEADING trim_list ')' | TRIM\n '(' TRAILING trim_list ')' | TRIM '(' trim_list ')' | NULLIF '('\n a_expr ',' a_expr ')' | COALESCE '(' expr_list ')' | GREATEST\n '(' expr_list ')' | LEAST '(' expr_list ')' | XMLCONCAT '('\n expr_list ')' | XMLELEMENT '(' NAME_P ColLabel ')' | XMLELEMENT\n '(' NAME_P ColLabel ',' xml_attributes ')' | XMLELEMENT '('\n NAME_P ColLabel ',' expr_list ')' | XMLELEMENT '(' NAME_P\n ColLabel ',' xml_attributes ',' expr_list ')' | XMLEXISTS '('\n c_expr xmlexists_argument ')' | XMLFOREST '(' xml_attribute_list\n ')' | XMLPARSE '(' document_or_content a_expr\n xml_whitespace_option ')' | XMLPI '(' NAME_P ColLabel ')' |\n XMLPI '(' NAME_P ColLabel ',' a_expr ')' | XMLROOT '(' a_expr\n ',' xml_root_version opt_xml_root_standalone ')' | XMLSERIALIZE\n '(' document_or_content a_expr AS SimpleTypename ')'\n xml_root_version ::= VERSION_P a_expr | VERSION_P NO VALUE_P\n opt_xml_root_standalone ::= ',' STANDALONE_P YES_P | ','\n STANDALONE_P NO | ',' STANDALONE_P NO VALUE_P |\n xml_attributes ::= XMLATTRIBUTES '(' xml_attribute_list ')'\n xml_attribute_list ::= xml_attribute_el | xml_attribute_list ','\n xml_attribute_el\n xml_attribute_el ::= a_expr AS ColLabel | a_expr\n document_or_content ::= DOCUMENT_P | CONTENT_P\n xml_whitespace_option ::= PRESERVE WHITESPACE_P | STRIP_P\n WHITESPACE_P |\n xmlexists_argument ::= PASSING c_expr | PASSING c_expr\n xml_passing_mech | PASSING xml_passing_mech c_expr | PASSING\n xml_passing_mech c_expr xml_passing_mech\n xml_passing_mech ::= BY REF | BY VALUE_P\n within_group_clause ::= WITHIN GROUP_P '(' sort_clause ')' |\n filter_clause ::= FILTER '(' WHERE a_expr ')' |\n window_clause ::= WINDOW window_definition_list |\n window_definition_list ::= window_definition |\n window_definition_list ',' window_definition\n window_definition ::= ColId AS window_specification\n over_clause ::= OVER window_specification | OVER ColId |\n window_specification ::= '(' opt_existing_window_name\n opt_partition_clause opt_sort_clause opt_frame_clause ')'\n opt_existing_window_name ::= ColId |\n opt_partition_clause ::= PARTITION BY expr_list |\n opt_frame_clause ::= RANGE frame_extent\n opt_window_exclusion_clause | ROWS frame_extent\n opt_window_exclusion_clause | GROUPS frame_extent\n opt_window_exclusion_clause |\n frame_extent ::= frame_bound | BETWEEN frame_bound AND\n frame_bound\n frame_bound ::= UNBOUNDED PRECEDING | UNBOUNDED FOLLOWING |\n CURRENT_P ROW | a_expr PRECEDING | a_expr FOLLOWING\n opt_window_exclusion_clause ::= EXCLUDE CURRENT_P ROW | EXCLUDE\n GROUP_P | EXCLUDE TIES | EXCLUDE NO OTHERS |\n row ::= ROW '(' expr_list ')' | ROW '(' ')' | '(' expr_list ','\n a_expr ')'\n explicit_row ::= ROW '(' expr_list ')' | ROW '(' ')'\n implicit_row ::= '(' expr_list ',' a_expr ')'\n sub_type ::= ANY | SOME | ALL\n all_Op ::= Op | MathOp\n MathOp ::= '+' | '-' | '*' | '/' | '%' | '^' | '<' | '>' |\n '=' | LESS_EQUALS | GREATER_EQUALS | NOT_EQUALS\n qual_Op ::= Op | OPERATOR '(' any_operator ')'\n qual_all_Op ::= all_Op | OPERATOR '(' any_operator ')'\n subquery_Op ::= all_Op | OPERATOR '(' any_operator ')' | LIKE |\n NOT_LA LIKE | ILIKE | NOT_LA ILIKE\n expr_list ::= a_expr | expr_list ',' a_expr\n func_arg_list ::= func_arg_expr | func_arg_list ','\n func_arg_expr\n func_arg_expr ::= a_expr | param_name COLON_EQUALS a_expr |\n param_name EQUALS_GREATER a_expr\n type_list ::= Typename | type_list ',' Typename\n array_expr ::= '[' expr_list ']' | '[' array_expr_list ']' | '['\n ']'\n array_expr_list ::= array_expr | array_expr_list ',' array_expr\n extract_list ::= extract_arg FROM a_expr |\n extract_arg ::= IDENT | YEAR_P | MONTH_P | DAY_P | HOUR_P |\n MINUTE_P | SECOND_P | Sconst\n unicode_normal_form ::= NFC | NFD | NFKC | NFKD\n overlay_list ::= a_expr overlay_placing substr_from substr_for |\n a_expr overlay_placing substr_from\n overlay_placing ::= PLACING a_expr\n position_list ::= b_expr IN_P b_expr |\n substr_list ::= a_expr substr_from substr_for | a_expr\n substr_for substr_from | a_expr substr_from | a_expr substr_for\n | expr_list |\n substr_from ::= FROM a_expr\n substr_for ::= FOR a_expr\n trim_list ::= a_expr FROM expr_list | FROM expr_list | expr_list\n in_expr ::= select_with_parens | '(' expr_list ')'\n case_expr ::= CASE case_arg when_clause_list case_default END_P\n when_clause_list ::= when_clause | when_clause_list when_clause\n when_clause ::= WHEN a_expr THEN a_expr\n case_default ::= ELSE a_expr |\n case_arg ::= a_expr |\n columnref ::= ColId | ColId indirection\n indirection_el ::= '.' attr_name | '.' '*' | '[' a_expr ']' |\n '[' opt_slice_bound ':' opt_slice_bound ']'\n opt_slice_bound ::= a_expr |\n indirection ::= indirection_el | indirection indirection_el\n opt_indirection ::= | opt_indirection indirection_el\n opt_asymmetric ::= ASYMMETRIC | /*empty*/\n opt_target_list ::= target_list |\n target_list ::= target_el | target_list ',' target_el\n target_el ::= a_expr AS ColLabel | a_expr IDENT | a_expr | '*'\n qualified_name_list ::= qualified_name | qualified_name_list ','\n qualified_name\n qualified_name ::= ColId | ColId indirection\n name_list ::= name | name_list ',' name\n name ::= ColId\n database_name ::= ColId\n access_method ::= ColId\n attr_name ::= ColLabel\n index_name ::= ColId\n file_name ::= Sconst\n func_name ::= type_function_name | ColId indirection\n AexprConst ::= Iconst | FCONST | Sconst | BCONST | XCONST |\n func_name Sconst | func_name '(' func_arg_list opt_sort_clause\n ')' Sconst | ConstTypename Sconst | ConstInterval Sconst\n opt_interval | ConstInterval '(' Iconst ')' Sconst | TRUE_P |\n FALSE_P | NULL_P\n Iconst ::= ICONST\n Sconst ::= SCONST\n SignedIconst ::= Iconst | '+' Iconst | '-' Iconst\n RoleId ::= RoleSpec\n RoleSpec ::= NonReservedWord | CURRENT_USER | SESSION_USER\n role_list ::= RoleSpec | role_list ',' RoleSpec\n ColId ::= IDENT | unreserved_keyword | col_name_keyword\n type_function_name ::= IDENT | unreserved_keyword |\n type_func_name_keyword\n NonReservedWord ::= IDENT | unreserved_keyword |\n col_name_keyword | type_func_name_keyword\n ColLabel ::= IDENT | unreserved_keyword | col_name_keyword |\n type_func_name_keyword | reserved_keyword\n unreserved_keyword ::= ABORT_P | ABSOLUTE_P | ACCESS | ACTION |\n ADD_P | ADMIN | AFTER | AGGREGATE | ALSO | ALTER | ALWAYS |\n ASSERTION | ASSIGNMENT | AT | ATTACH | ATTRIBUTE | BACKWARD |\n BEFORE | BEGIN_P | BY | CACHE | CALL | CALLED | CASCADE |\n CASCADED | CATALOG_P | CHAIN | CHARACTERISTICS | CHECKPOINT |\n CLASS | CLOSE | CLUSTER | COLUMNS | COMMENT | COMMENTS | COMMIT\n | COMMITTED | CONFIGURATION | CONFLICT | CONNECTION |\n CONSTRAINTS | CONTENT_P | CONTINUE_P | CONVERSION_P | COPY |\n COST | CSV | CUBE | CURRENT_P | CURSOR | CYCLE | DATA_P |\n DATABASE | DAY_P | DEALLOCATE | DECLARE | DEFAULTS | DEFERRED |\n DEFINER | DELETE_P | DELIMITER | DELIMITERS | DEPENDS | DETACH |\n DICTIONARY | DISABLE_P | DISCARD | DOCUMENT_P | DOMAIN_P |\n DOUBLE_P | DROP | EACH | ENABLE_P | ENCODING | ENCRYPTED |\n ENUM_P | ESCAPE | EVENT | EXCLUDE | EXCLUDING | EXCLUSIVE |\n EXECUTE | EXPLAIN | EXPRESSION | EXTENSION | EXTERNAL | FAMILY |\n FILTER | FIRST_P | FOLLOWING | FORCE | FORWARD | FUNCTION |\n FUNCTIONS | GENERATED | GLOBAL | GRANTED | GROUPS | HANDLER |\n HEADER_P | HOLD | HOUR_P | IDENTITY_P | IF_P | IMMEDIATE |\n IMMUTABLE | IMPLICIT_P | IMPORT_P | INCLUDE | INCLUDING |\n INCREMENT | INDEX | INDEXES | INHERIT | INHERITS | INLINE_P |\n INPUT_P | INSENSITIVE | INSERT | INSTEAD | INVOKER | ISOLATION |\n KEY | LABEL | LANGUAGE | LARGE_P | LAST_P | LEAKPROOF | LEVEL |\n LISTEN | LOAD | LOCAL | LOCATION | LOCK_P | LOCKED | LOGGED |\n MAPPING | MATCH | MATERIALIZED | MAXVALUE | METHOD | MINUTE_P |\n MINVALUE | MODE | MONTH_P | MOVE | NAME_P | NAMES | NEW | NEXT |\n NFC | NFD | NFKC | NFKD | NO | NORMALIZED | NOTHING | NOTIFY |\n NOWAIT | NULLS_P | OBJECT_P | OF | OFF | OIDS | OLD | OPERATOR |\n OPTION | OPTIONS | ORDINALITY | OTHERS | OVER | OVERRIDING |\n OWNED | OWNER | PARALLEL | PARSER | PARTIAL | PARTITION |\n PASSING | PASSWORD | PLANS | POLICY | PRECEDING | PREPARE |\n PREPARED | PRESERVE | PRIOR | PRIVILEGES | PROCEDURAL |\n PROCEDURE | PROCEDURES | PROGRAM | PUBLICATION | QUOTE | RANGE |\n READ | REASSIGN | RECHECK | RECURSIVE | REF | REFERENCING |\n REFRESH | REINDEX | RELATIVE_P | RELEASE | RENAME | REPEATABLE |\n REPLACE | REPLICA | RESET | RESTART | RESTRICT | RETURNS |\n REVOKE | ROLE | ROLLBACK | ROLLUP | ROUTINE | ROUTINES | ROWS |\n RULE | SAVEPOINT | SCHEMA | SCHEMAS | SCROLL | SEARCH | SECOND_P\n | SECURITY | SEQUENCE | SEQUENCES | SERIALIZABLE | SERVER |\n SESSION | SET | SETS | SHARE | SHOW | SIMPLE | SKIP | SNAPSHOT |\n SQL_P | STABLE | STANDALONE_P | START | STATEMENT | STATISTICS |\n STDIN | STDOUT | STORAGE | STORED | STRICT_P | STRIP_P |\n SUBSCRIPTION | SUPPORT | SYSID | SYSTEM_P | TABLES | TABLESPACE\n | TEMP | TEMPLATE | TEMPORARY | TEXT_P | TIES | TRANSACTION |\n TRANSFORM | TRIGGER | TRUNCATE | TRUSTED | TYPE_P | TYPES_P |\n UESCAPE | UNBOUNDED | UNCOMMITTED | UNENCRYPTED | UNKNOWN |\n UNLISTEN | UNLOGGED | UNTIL | UPDATE | VACUUM | VALID | VALIDATE\n | VALIDATOR | VALUE_P | VARYING | VERSION_P | VIEW | VIEWS |\n VOLATILE | WHITESPACE_P | WITHIN | WITHOUT | WORK | WRAPPER |\n WRITE | XML_P | YEAR_P | YES_P | ZONE\n col_name_keyword ::= BETWEEN | BIGINT | BIT | BOOLEAN_P | CHAR_P\n | CHARACTER | COALESCE | DEC | DECIMAL_P | EXISTS | EXTRACT |\n FLOAT_P | GREATEST | GROUPING | INOUT | INT_P | INTEGER |\n INTERVAL | LEAST | NATIONAL | NCHAR | NONE | NORMALIZE | NULLIF\n | NUMERIC | OUT_P | OVERLAY | POSITION | PRECISION | REAL | ROW\n | SETOF | SMALLINT | SUBSTRING | TIME | TIMESTAMP | TREAT | TRIM\n | VALUES | VARCHAR | XMLATTRIBUTES | XMLCONCAT | XMLELEMENT |\n XMLEXISTS | XMLFOREST | XMLNAMESPACES | XMLPARSE | XMLPI |\n XMLROOT | XMLSERIALIZE | XMLTABLE\n type_func_name_keyword ::= AUTHORIZATION | BINARY | COLLATION |\n CONCURRENTLY | CROSS | CURRENT_SCHEMA | FREEZE | FULL | ILIKE |\n INNER_P | IS | ISNULL | JOIN | LEFT | LIKE | NATURAL | NOTNULL |\n OUTER_P | OVERLAPS | RIGHT | SIMILAR | TABLESAMPLE | VERBOSE\n reserved_keyword ::= ALL | ANALYSE | ANALYZE | AND | ANY | ARRAY\n | AS | ASC | ASYMMETRIC | BOTH | CASE | CAST | CHECK | COLLATE |\n COLUMN | CONSTRAINT | CREATE | CURRENT_CATALOG | CURRENT_DATE |\n CURRENT_ROLE | CURRENT_TIME | CURRENT_TIMESTAMP | CURRENT_USER |\n DEFAULT | DEFERRABLE | DESC | DISTINCT | DO | ELSE | END_P |\n EXCEPT | FALSE_P | FETCH | FOR | FOREIGN | FROM | GRANT |\n GROUP_P | HAVING | IN_P | INITIALLY | INTERSECT | INTO |\n LATERAL_P | LEADING | LIMIT | LOCALTIME | LOCALTIMESTAMP | NOT |\n NULL_P | OFFSET | ON | ONLY | OR | ORDER | PLACING | PRIMARY |\n REFERENCES | RETURNING | SELECT | SESSION_USER | SOME |\n SYMMETRIC | TABLE | THEN | TO | TRAILING | TRUE_P | UNION |\n UNIQUE | USER | USING | VARIADIC | WHEN | WHERE | WINDOW | WITH\n\n // Tokens from postgresql-13.3/src/include/parser/kwlist.h\n\n ABORT_P ::= \"abort\"\n ABSOLUTE_P ::= \"absolute\"\n ACCESS ::= \"access\"\n ACTION ::= \"action\"\n ADD_P ::= \"add\"\n ADMIN ::= \"admin\"\n AFTER ::= \"after\"\n AGGREGATE ::= \"aggregate\"\n ALL ::= \"all\"\n ALSO ::= \"also\"\n ALTER ::= \"alter\"\n ALWAYS ::= \"always\"\n ANALYSE ::= \"analyse\"\n ANALYZE ::= \"analyze\"\n AND ::= \"and\"\n ANY ::= \"any\"\n ARRAY ::= \"array\"\n AS ::= \"as\"\n ASC ::= \"asc\"\n ASSERTION ::= \"assertion\"\n ASSIGNMENT ::= \"assignment\"\n ASYMMETRIC ::= \"asymmetric\"\n AT ::= \"at\"\n ATTACH ::= \"attach\"\n ATTRIBUTE ::= \"attribute\"\n AUTHORIZATION ::= \"authorization\"\n BACKWARD ::= \"backward\"\n BEFORE ::= \"before\"\n BEGIN_P ::= \"begin\"\n BETWEEN ::= \"between\"\n BIGINT ::= \"bigint\"\n BINARY ::= \"binary\"\n BIT ::= \"bit\"\n BOOLEAN_P ::= \"boolean\"\n BOTH ::= \"both\"\n BY ::= \"by\"\n CACHE ::= \"cache\"\n CALL ::= \"call\"\n CALLED ::= \"called\"\n CASCADE ::= \"cascade\"\n CASCADED ::= \"cascaded\"\n CASE ::= \"case\"\n CAST ::= \"cast\"\n CATALOG_P ::= \"catalog\"\n CHAIN ::= \"chain\"\n CHAR_P ::= \"char\"\n CHARACTER ::= \"character\"\n CHARACTERISTICS ::= \"characteristics\"\n CHECK ::= \"check\"\n CHECKPOINT ::= \"checkpoint\"\n CLASS ::= \"class\"\n CLOSE ::= \"close\"\n CLUSTER ::= \"cluster\"\n COALESCE ::= \"coalesce\"\n COLLATE ::= \"collate\"\n COLLATION ::= \"collation\"\n COLUMN ::= \"column\"\n COLUMNS ::= \"columns\"\n COMMENT ::= \"comment\"\n COMMENTS ::= \"comments\"\n COMMIT ::= \"commit\"\n COMMITTED ::= \"committed\"\n CONCURRENTLY ::= \"concurrently\"\n CONFIGURATION ::= \"configuration\"\n CONFLICT ::= \"conflict\"\n CONNECTION ::= \"connection\"\n CONSTRAINT ::= \"constraint\"\n CONSTRAINTS ::= \"constraints\"\n CONTENT_P ::= \"content\"\n CONTINUE_P ::= \"continue\"\n CONVERSION_P ::= \"conversion\"\n COPY ::= \"copy\"\n COST ::= \"cost\"\n CREATE ::= \"create\"\n CROSS ::= \"cross\"\n CSV ::= \"csv\"\n CUBE ::= \"cube\"\n CURRENT_P ::= \"current\"\n CURRENT_CATALOG ::= \"current_catalog\"\n CURRENT_DATE ::= \"current_date\"\n CURRENT_ROLE ::= \"current_role\"\n CURRENT_SCHEMA ::= \"current_schema\"\n CURRENT_TIME ::= \"current_time\"\n CURRENT_TIMESTAMP ::= \"current_timestamp\"\n CURRENT_USER ::= \"current_user\"\n CURSOR ::= \"cursor\"\n CYCLE ::= \"cycle\"\n DATA_P ::= \"data\"\n DATABASE ::= \"database\"\n DAY_P ::= \"day\"\n DEALLOCATE ::= \"deallocate\"\n DEC ::= \"dec\"\n DECIMAL_P ::= \"decimal\"\n DECLARE ::= \"declare\"\n DEFAULT ::= \"default\"\n DEFAULTS ::= \"defaults\"\n DEFERRABLE ::= \"deferrable\"\n DEFERRED ::= \"deferred\"\n DEFINER ::= \"definer\"\n DELETE_P ::= \"delete\"\n DELIMITER ::= \"delimiter\"\n DELIMITERS ::= \"delimiters\"\n DEPENDS ::= \"depends\"\n DESC ::= \"desc\"\n DETACH ::= \"detach\"\n DICTIONARY ::= \"dictionary\"\n DISABLE_P ::= \"disable\"\n DISCARD ::= \"discard\"\n DISTINCT ::= \"distinct\"\n DO ::= \"do\"\n DOCUMENT_P ::= \"document\"\n DOMAIN_P ::= \"domain\"\n DOUBLE_P ::= \"double\"\n DROP ::= \"drop\"\n EACH ::= \"each\"\n ELSE ::= \"else\"\n ENABLE_P ::= \"enable\"\n ENCODING ::= \"encoding\"\n ENCRYPTED ::= \"encrypted\"\n END_P ::= \"end\"\n ENUM_P ::= \"enum\"\n ESCAPE ::= \"escape\"\n EVENT ::= \"event\"\n EXCEPT ::= \"except\"\n EXCLUDE ::= \"exclude\"\n EXCLUDING ::= \"excluding\"\n EXCLUSIVE ::= \"exclusive\"\n EXECUTE ::= \"execute\"\n EXISTS ::= \"exists\"\n EXPLAIN ::= \"explain\"\n EXPRESSION ::= \"expression\"\n EXTENSION ::= \"extension\"\n EXTERNAL ::= \"external\"\n EXTRACT ::= \"extract\"\n FALSE_P ::= \"false\"\n FAMILY ::= \"family\"\n FETCH ::= \"fetch\"\n FILTER ::= \"filter\"\n FIRST_P ::= \"first\"\n FLOAT_P ::= \"float\"\n FOLLOWING ::= \"following\"\n FOR ::= \"for\"\n FORCE ::= \"force\"\n FOREIGN ::= \"foreign\"\n FORWARD ::= \"forward\"\n FREEZE ::= \"freeze\"\n FROM ::= \"from\"\n FULL ::= \"full\"\n FUNCTION ::= \"function\"\n FUNCTIONS ::= \"functions\"\n GENERATED ::= \"generated\"\n GLOBAL ::= \"global\"\n GRANT ::= \"grant\"\n GRANTED ::= \"granted\"\n GREATEST ::= \"greatest\"\n GROUP_P ::= \"group\"\n GROUPING ::= \"grouping\"\n GROUPS ::= \"groups\"\n HANDLER ::= \"handler\"\n HAVING ::= \"having\"\n HEADER_P ::= \"header\"\n HOLD ::= \"hold\"\n HOUR_P ::= \"hour\"\n IDENTITY_P ::= \"identity\"\n IF_P ::= \"if\"\n ILIKE ::= \"ilike\"\n IMMEDIATE ::= \"immediate\"\n IMMUTABLE ::= \"immutable\"\n IMPLICIT_P ::= \"implicit\"\n IMPORT_P ::= \"import\"\n IN_P ::= \"in\"\n INCLUDE ::= \"include\"\n INCLUDING ::= \"including\"\n INCREMENT ::= \"increment\"\n INDEX ::= \"index\"\n INDEXES ::= \"indexes\"\n INHERIT ::= \"inherit\"\n INHERITS ::= \"inherits\"\n INITIALLY ::= \"initially\"\n INLINE_P ::= \"inline\"\n INNER_P ::= \"inner\"\n INOUT ::= \"inout\"\n INPUT_P ::= \"input\"\n INSENSITIVE ::= \"insensitive\"\n INSERT ::= \"insert\"\n INSTEAD ::= \"instead\"\n INT_P ::= \"int\"\n INTEGER ::= \"integer\"\n INTERSECT ::= \"intersect\"\n INTERVAL ::= \"interval\"\n INTO ::= \"into\"\n INVOKER ::= \"invoker\"\n IS ::= \"is\"\n ISNULL ::= \"isnull\"\n ISOLATION ::= \"isolation\"\n JOIN ::= \"join\"\n KEY ::= \"key\"\n LABEL ::= \"label\"\n LANGUAGE ::= \"language\"\n LARGE_P ::= \"large\"\n LAST_P ::= \"last\"\n LATERAL_P ::= \"lateral\"\n LEADING ::= \"leading\"\n LEAKPROOF ::= \"leakproof\"\n LEAST ::= \"least\"\n LEFT ::= \"left\"\n LEVEL ::= \"level\"\n LIKE ::= \"like\"\n LIMIT ::= \"limit\"\n LISTEN ::= \"listen\"\n LOAD ::= \"load\"\n LOCAL ::= \"local\"\n LOCALTIME ::= \"localtime\"\n LOCALTIMESTAMP ::= \"localtimestamp\"\n LOCATION ::= \"location\"\n LOCK_P ::= \"lock\"\n LOCKED ::= \"locked\"\n LOGGED ::= \"logged\"\n MAPPING ::= \"mapping\"\n MATCH ::= \"match\"\n MATERIALIZED ::= \"materialized\"\n MAXVALUE ::= \"maxvalue\"\n METHOD ::= \"method\"\n MINUTE_P ::= \"minute\"\n MINVALUE ::= \"minvalue\"\n MODE ::= \"mode\"\n MONTH_P ::= \"month\"\n MOVE ::= \"move\"\n NAME_P ::= \"name\"\n NAMES ::= \"names\"\n NATIONAL ::= \"national\"\n NATURAL ::= \"natural\"\n NCHAR ::= \"nchar\"\n NEW ::= \"new\"\n NEXT ::= \"next\"\n NFC ::= \"nfc\"\n NFD ::= \"nfd\"\n NFKC ::= \"nfkc\"\n NFKD ::= \"nfkd\"\n NO ::= \"no\"\n NONE ::= \"none\"\n NORMALIZE ::= \"normalize\"\n NORMALIZED ::= \"normalized\"\n NOT ::= \"not\"\n NOTHING ::= \"nothing\"\n NOTIFY ::= \"notify\"\n NOTNULL ::= \"notnull\"\n NOWAIT ::= \"nowait\"\n NULL_P ::= \"null\"\n NULLIF ::= \"nullif\"\n NULLS_P ::= \"nulls\"\n NUMERIC ::= \"numeric\"\n OBJECT_P ::= \"object\"\n OF ::= \"of\"\n OFF ::= \"off\"\n OFFSET ::= \"offset\"\n OIDS ::= \"oids\"\n OLD ::= \"old\"\n ON ::= \"on\"\n ONLY ::= \"only\"\n OPERATOR ::= \"operator\"\n OPTION ::= \"option\"\n OPTIONS ::= \"options\"\n OR ::= \"or\"\n ORDER ::= \"order\"\n ORDINALITY ::= \"ordinality\"\n OTHERS ::= \"others\"\n OUT_P ::= \"out\"\n OUTER_P ::= \"outer\"\n OVER ::= \"over\"\n OVERLAPS ::= \"overlaps\"\n OVERLAY ::= \"overlay\"\n OVERRIDING ::= \"overriding\"\n OWNED ::= \"owned\"\n OWNER ::= \"owner\"\n PARALLEL ::= \"parallel\"\n PARSER ::= \"parser\"\n PARTIAL ::= \"partial\"\n PARTITION ::= \"partition\"\n PASSING ::= \"passing\"\n PASSWORD ::= \"password\"\n PLACING ::= \"placing\"\n PLANS ::= \"plans\"\n POLICY ::= \"policy\"\n POSITION ::= \"position\"\n PRECEDING ::= \"preceding\"\n PRECISION ::= \"precision\"\n PREPARE ::= \"prepare\"\n PREPARED ::= \"prepared\"\n PRESERVE ::= \"preserve\"\n PRIMARY ::= \"primary\"\n PRIOR ::= \"prior\"\n PRIVILEGES ::= \"privileges\"\n PROCEDURAL ::= \"procedural\"\n PROCEDURE ::= \"procedure\"\n PROCEDURES ::= \"procedures\"\n PROGRAM ::= \"program\"\n PUBLICATION ::= \"publication\"\n QUOTE ::= \"quote\"\n RANGE ::= \"range\"\n READ ::= \"read\"\n REAL ::= \"real\"\n REASSIGN ::= \"reassign\"\n RECHECK ::= \"recheck\"\n RECURSIVE ::= \"recursive\"\n REF ::= \"ref\"\n REFERENCES ::= \"references\"\n REFERENCING ::= \"referencing\"\n REFRESH ::= \"refresh\"\n REINDEX ::= \"reindex\"\n RELATIVE_P ::= \"relative\"\n RELEASE ::= \"release\"\n RENAME ::= \"rename\"\n REPEATABLE ::= \"repeatable\"\n REPLACE ::= \"replace\"\n REPLICA ::= \"replica\"\n RESET ::= \"reset\"\n RESTART ::= \"restart\"\n RESTRICT ::= \"restrict\"\n RETURNING ::= \"returning\"\n RETURNS ::= \"returns\"\n REVOKE ::= \"revoke\"\n RIGHT ::= \"right\"\n ROLE ::= \"role\"\n ROLLBACK ::= \"rollback\"\n ROLLUP ::= \"rollup\"\n ROUTINE ::= \"routine\"\n ROUTINES ::= \"routines\"\n ROW ::= \"row\"\n ROWS ::= \"rows\"\n RULE ::= \"rule\"\n SAVEPOINT ::= \"savepoint\"\n SCHEMA ::= \"schema\"\n SCHEMAS ::= \"schemas\"\n SCROLL ::= \"scroll\"\n SEARCH ::= \"search\"\n SECOND_P ::= \"second\"\n SECURITY ::= \"security\"\n SELECT ::= \"select\"\n SEQUENCE ::= \"sequence\"\n SEQUENCES ::= \"sequences\"\n SERIALIZABLE ::= \"serializable\"\n SERVER ::= \"server\"\n SESSION ::= \"session\"\n SESSION_USER ::= \"session_user\"\n SET ::= \"set\"\n SETOF ::= \"setof\"\n SETS ::= \"sets\"\n SHARE ::= \"share\"\n SHOW ::= \"show\"\n SIMILAR ::= \"similar\"\n SIMPLE ::= \"simple\"\n SKIP ::= \"skip\"\n SMALLINT ::= \"smallint\"\n SNAPSHOT ::= \"snapshot\"\n SOME ::= \"some\"\n SQL_P ::= \"sql\"\n STABLE ::= \"stable\"\n STANDALONE_P ::= \"standalone\"\n START ::= \"start\"\n STATEMENT ::= \"statement\"\n STATISTICS ::= \"statistics\"\n STDIN ::= \"stdin\"\n STDOUT ::= \"stdout\"\n STORAGE ::= \"storage\"\n STORED ::= \"stored\"\n STRICT_P ::= \"strict\"\n STRIP_P ::= \"strip\"\n SUBSCRIPTION ::= \"subscription\"\n SUBSTRING ::= \"substring\"\n SUPPORT ::= \"support\"\n SYMMETRIC ::= \"symmetric\"\n SYSID ::= \"sysid\"\n SYSTEM_P ::= \"system\"\n TABLE ::= \"table\"\n TABLES ::= \"tables\"\n TABLESAMPLE ::= \"tablesample\"\n TABLESPACE ::= \"tablespace\"\n TEMP ::= \"temp\"\n TEMPLATE ::= \"template\"\n TEMPORARY ::= \"temporary\"\n TEXT_P ::= \"text\"\n THEN ::= \"then\"\n TIES ::= \"ties\"\n TIME ::= \"time\"\n TIMESTAMP ::= \"timestamp\"\n TO ::= \"to\"\n TRAILING ::= \"trailing\"\n TRANSACTION ::= \"transaction\"\n TRANSFORM ::= \"transform\"\n TREAT ::= \"treat\"\n TRIGGER ::= \"trigger\"\n TRIM ::= \"trim\"\n TRUE_P ::= \"true\"\n TRUNCATE ::= \"truncate\"\n TRUSTED ::= \"trusted\"\n TYPE_P ::= \"type\"\n TYPES_P ::= \"types\"\n UESCAPE ::= \"uescape\"\n UNBOUNDED ::= \"unbounded\"\n UNCOMMITTED ::= \"uncommitted\"\n UNENCRYPTED ::= \"unencrypted\"\n UNION ::= \"union\"\n UNIQUE ::= \"unique\"\n UNKNOWN ::= \"unknown\"\n UNLISTEN ::= \"unlisten\"\n UNLOGGED ::= \"unlogged\"\n UNTIL ::= \"until\"\n UPDATE ::= \"update\"\n USER ::= \"user\"\n USING ::= \"using\"\n VACUUM ::= \"vacuum\"\n VALID ::= \"valid\"\n VALIDATE ::= \"validate\"\n VALIDATOR ::= \"validator\"\n VALUE_P ::= \"value\"\n VALUES ::= \"values\"\n VARCHAR ::= \"varchar\"\n VARIADIC ::= \"variadic\"\n VARYING ::= \"varying\"\n VERBOSE ::= \"verbose\"\n VERSION_P ::= \"version\"\n VIEW ::= \"view\"\n VIEWS ::= \"views\"\n VOLATILE ::= \"volatile\"\n WHEN ::= \"when\"\n WHERE ::= \"where\"\n WHITESPACE_P ::= \"whitespace\"\n WINDOW ::= \"window\"\n WITH ::= \"with\"\n WITHIN ::= \"within\"\n WITHOUT ::= \"without\"\n WORK ::= \"work\"\n WRAPPER ::= \"wrapper\"\n WRITE ::= \"write\"\n XML_P ::= \"xml\"\n XMLATTRIBUTES ::= \"xmlattributes\"\n XMLCONCAT ::= \"xmlconcat\"\n XMLELEMENT ::= \"xmlelement\"\n XMLEXISTS ::= \"xmlexists\"\n XMLFOREST ::= \"xmlforest\"\n XMLNAMESPACES ::= \"xmlnamespaces\"\n XMLPARSE ::= \"xmlparse\"\n XMLPI ::= \"xmlpi\"\n XMLROOT ::= \"xmlroot\"\n XMLSERIALIZE ::= \"xmlserialize\"\n XMLTABLE ::= \"xmltable\"\n YEAR_P ::= \"year\"\n YES_P ::= \"yes\"\n ZONE ::= \"zone\"\n\n====", "msg_date": "Sat, 3 Jul 2021 10:39:02 +0200", "msg_from": "Domingo Alvarez Duarte <mingodad@gmail.com>", "msg_from_op": true, "msg_subject": "Grammar railroad diagram" }, { "msg_contents": "On Sat, Jul 3, 2021 at 10:39:02AM +0200, Domingo Alvarez Duarte wrote:\n> I've done a experimental tool to convert bison grammars to a kind of EBNF\n> understood by https://www.bottlecaps.de/rr/ui to generate railroad diagrams see\n> bellow the converted 'postgresql-13.3/src/backend/parser/gram.y' and with some\n> hand made changes to allow view it at https://www.bottlecaps.de/rr/ui the order\n> of the rules could be changed to a better view of the railroad diagrams. Copy\n> and paste the EBNF bellow on https://www.bottlecaps.de/rr/ui tab Edit Grammar\n> then switch to the tab View Diagram.\n\nThat is pretty cool. I had trouble figuring out how to get it working,\nso here are the steps I used:\n\n\t1. save my attachment (created by Domingo)\n\t2. go to https://www.bottlecaps.de/rr/ui\n\t3. select \"Edit Grammar\"\n\t4. choose \"Browse\" at the bottom\n\t5. select the attachment you saved in #1\n\t6. choose \"Load\" at the bottom\n\t7. select \"View Diagram\"\n\nYou can even click on the yellow boxes to see the sub-grammar. People\nhave asked for railroad diagrams in the past, and this certainly\nproduces them, and \"Options\" allows many customizations.\n\nI tried downloading as XHTML+SVG and HTML+PNG but got an error:\n\n\tHTTP Status 500 – Internal Server Error\n\n\tType Exception Report\n\t\n\tMessage The multi-part request contained parameter data (excluding\n\tuploaded files) that exceeded the limit for maxPostSize set on the\n\tassociated connector\n\t\n\tDescription The server encountered an unexpected condition that\n\tprevented it from fulfilling the request.\n\nIt might be nice to download this output and host it on the Postgres\nwebsite at some point.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.", "msg_date": "Tue, 6 Jul 2021 12:51:54 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: Grammar railroad diagram" }, { "msg_contents": "Hello Bruce !\n\nYou can download the railroad generator to generate offline using Java \nhere -> https://www.bottlecaps.de/rr/download/rr-1.63-java8.zip (link \nfrom the https://www.bottlecaps.de/rr/ui on tab Welcome).\n\njava -jar rr.war -out:Dafny.atg.xhtml grammar.txt\n\nCheers !\n\nOn 6/7/21 18:51, Bruce Momjian wrote:\n> On Sat, Jul 3, 2021 at 10:39:02AM +0200, Domingo Alvarez Duarte wrote:\n>> I've done a experimental tool to convert bison grammars to a kind of EBNF\n>> understood by�https://www.bottlecaps.de/rr/ui�to generate railroad diagrams see\n>> bellow the converted 'postgresql-13.3/src/backend/parser/gram.y' and with some\n>> hand made changes to allow view it at�https://www.bottlecaps.de/rr/ui�the order\n>> of the rules could be changed to a better view of the railroad diagrams. Copy\n>> and paste the EBNF bellow on�https://www.bottlecaps.de/rr/ui�tab Edit Grammar\n>> then switch to the tab View Diagram.\n> That is pretty cool. I had trouble figuring out how to get it working,\n> so here are the steps I used:\n>\n> \t1. save my attachment (created by Domingo)\n> \t2. go to https://www.bottlecaps.de/rr/ui\n> \t3. select \"Edit Grammar\"\n> \t4. choose \"Browse\" at the bottom\n> \t5. select the attachment you saved in #1\n> \t6. choose \"Load\" at the bottom\n> \t7. select \"View Diagram\"\n>\n> You can even click on the yellow boxes to see the sub-grammar. People\n> have asked for railroad diagrams in the past, and this certainly\n> produces them, and \"Options\" allows many customizations.\n>\n> I tried downloading as XHTML+SVG and HTML+PNG but got an error:\n>\n> \tHTTP Status 500 � Internal Server Error\n>\n> \tType Exception Report\n> \t\n> \tMessage The multi-part request contained parameter data (excluding\n> \tuploaded files) that exceeded the limit for maxPostSize set on the\n> \tassociated connector\n> \t\n> \tDescription The server encountered an unexpected condition that\n> \tprevented it from fulfilling the request.\n>\n> It might be nice to download this output and host it on the Postgres\n> website at some point.\n>\n\n\n", "msg_date": "Tue, 6 Jul 2021 18:57:22 +0200", "msg_from": "Domingo Alvarez Duarte <mingodad@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Grammar railroad diagram" }, { "msg_contents": "Hi,\n\nOn 2021-07-03 10:39:02 +0200, Domingo Alvarez Duarte wrote:\n> I've done a experimental tool to convert bison grammars to a kind of EBNF\n> understood by https://www.bottlecaps.de/rr/ui\n\nIt'd be nice if you could share that tool. The diagrams this can generate\nare neat...\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Thu, 8 Jul 2021 19:36:41 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: Grammar railroad diagram" }, { "msg_contents": "Hello Andres !\n\nThere is two ways to achieve it:\n\n-1 I just add the bison grammar in CocoR format here \nhttps://github.com/mingodad/CocoR-CPP in the examples folder.\n\n-2 I created an small extension to Bison to do the same and published \nthe patch here \nhttps://github.com/mingodad/bison/commit/da84329ebe5f4bc111ef34b2d46088b655a217f3 \n(bison -e yourgramar.y)\n\nAnd optionally to have the best railroad diagram we need to add the \n\"Tokens\" manually.\n\nCheers !\n\nOn 9/7/21 4:36, Andres Freund wrote:\n> Hi,\n>\n> On 2021-07-03 10:39:02 +0200, Domingo Alvarez Duarte wrote:\n>> I've done a experimental tool to convert bison grammars to a kind of EBNF\n>> understood by https://www.bottlecaps.de/rr/ui\n> It'd be nice if you could share that tool. The diagrams this can generate\n> are neat...\n>\n> Greetings,\n>\n> Andres Freund\n\n\n", "msg_date": "Fri, 9 Jul 2021 09:56:29 +0200", "msg_from": "Domingo Alvarez Duarte <mingodad@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Grammar railroad diagram" }, { "msg_contents": "\nDomingo, Bruce,\n \nsorry for the error. It was caused by my server using Tomcat's default maxPostSize of 2MB, which\nis not sufficient for the diagrams generated from the Postgres grammar. I have now extended it\nto 10MB.\n \nBy the way, I had already created diagrams for PostgreSQL back in 2017, please find them here:\n\nhttps://cdn.rawgit.com/GuntherRademacher/1e5a275f433fdc61bc4e81e24c287d67/raw/7c8599f5d2bf8450c52750abd70bb4bc90369bf8/gram.xhtml\n \nAt the time, this answered a question on StackOverflow, but apparently that question has been\ndeleted since. Those diagrams were created by pasting the content of\n \nhttps://raw.githubusercontent.com/postgres/postgres/master/src/backend/parser/gram.y\n \nto\n \nhttps://bottlecaps.de/convert/\n \nthen clicking \"Convert\" and on the bottom of the result page, \"View Diagram\".\n \nDownloading diagrams now works after maxPostSize has been extended on my side.\n \nBest regards\nGunther\n \n\nGesendet: Dienstag, 06. Juli 2021 um 20:04 Uhr\nVon: \"Domingo Alvarez Duarte\" <mingodad@gmail.com>\nAn: grd@gmx.net\nBetreff: Fwd: Grammar railroad diagram\n\nHello Gunther !\nI've sent this to postgresql and they reported the error bellow.\nCheers !\n\n-------- Forwarded Message --------\n\n\n\nSubject:\nRe: Grammar railroad diagram\n\n\nDate:\nTue, 6 Jul 2021 12:51:54 -0400\n\n\nFrom:\nBruce Momjian <bruce@momjian.us>\n\n\nTo:\nDomingo Alvarez Duarte <mingodad@gmail.com>\n\n\nCC:\npgsql-hackers@lists.postgresql.org\n\n\n\n\n\nOn Sat, Jul 3, 2021 at 10:39:02AM +0200, Domingo Alvarez Duarte wrote:\n\nI've done a experimental tool to convert bison grammars to a kind of EBNF\nunderstood by https://www.bottlecaps.de/rr/ui to generate railroad diagrams see\nbellow the converted 'postgresql-13.3/src/backend/parser/gram.y' and with some\nhand made changes to allow view it at https://www.bottlecaps.de/rr/ui the order\nof the rules could be changed to a better view of the railroad diagrams. Copy\nand paste the EBNF bellow on https://www.bottlecaps.de/rr/ui tab Edit Grammar\nthen switch to the tab View Diagram.\n\n\n\nThat is pretty cool. I had trouble figuring out how to get it working,\nso here are the steps I used:\n\n1. save my attachment (created by Domingo)\n2. go to https://www.bottlecaps.de/rr/ui\n3. select \"Edit Grammar\"\n4. choose \"Browse\" at the bottom\n5. select the attachment you saved in #1\n6. choose \"Load\" at the bottom\n7. select \"View Diagram\"\n\nYou can even click on the yellow boxes to see the sub-grammar. People\nhave asked for railroad diagrams in the past, and this certainly\nproduces them, and \"Options\" allows many customizations.\n\nI tried downloading as XHTML+SVG and HTML+PNG but got an error:\n\nHTTP Status 500 – Internal Server Error\n\nType Exception Report\n\nMessage The multi-part request contained parameter data (excluding\nuploaded files) that exceeded the limit for maxPostSize set on the\nassociated connector\n\nDescription The server encountered an unexpected condition that\nprevented it from fulfilling the request.\n\nIt might be nice to download this output and host it on the Postgres\nwebsite at some point.\n \n--\n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n\n\n\n\n\n", "msg_date": "Fri, 9 Jul 2021 16:13:38 +0200", "msg_from": "grd@gmx.net", "msg_from_op": false, "msg_subject": "Re: Fwd: Grammar railroad diagram" }, { "msg_contents": "Hello Andres !\n\nAnother way that I tested and it's working is to use \nhttps://www.bottlecaps.de/convert/ paste the postgresql grammar there \nand press \"convert\" and after press \"view diagram\".\n\nAgain optionally manually add the Tokens to a better diagram !\n\n====\n\n// Tokens from postgresql-13.3/src/include/parser/kwlist.h\n\nABORT_P ::= \"abort\"\nABSOLUTE_P ::= \"absolute\"\nACCESS ::= \"access\"\nACTION ::= \"action\"\n\n...\n\n====\n\nCheers !\n\nOn 9/7/21 4:36, Andres Freund wrote:\n> Hi,\n>\n> On 2021-07-03 10:39:02 +0200, Domingo Alvarez Duarte wrote:\n>> I've done a experimental tool to convert bison grammars to a kind of EBNF\n>> understood by https://www.bottlecaps.de/rr/ui\n> It'd be nice if you could share that tool. The diagrams this can generate\n> are neat...\n>\n> Greetings,\n>\n> Andres Freund\n\n\n", "msg_date": "Fri, 9 Jul 2021 17:00:29 +0200", "msg_from": "Domingo Alvarez Duarte <mingodad@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Grammar railroad diagram" }, { "msg_contents": ">\n>\n> Another way that I tested and it's working is to use\n> https://www.bottlecaps.de/convert/ paste the postgresql grammar there\n> and press \"convert\" and after press \"view diagram\".\n>\n\nI tried this out and I'm pleased to see that one of the outputs is xhtml +\nSVG, because SVGs have hover-over tool-tips, which are an important aspect\nof accessibility, which was my major concern the last time a similar thing\nwas proposed [1].\n\n[1]\nhttps://www.postgresql.org/message-id/CAH2-Wzmfc+P3pC_u1DsgM3LawURzKx5PqZmxtGLgSXBf8gFs8A@mail.gmail.com\n\nAnother way that I tested and it's working is to use \nhttps://www.bottlecaps.de/convert/ paste the postgresql grammar there \nand press \"convert\" and after press \"view diagram\".I tried this out and I'm pleased to see that one of the outputs is xhtml + SVG, because SVGs have hover-over tool-tips, which are an important aspect of accessibility, which was my major concern the last time a similar thing was proposed [1].[1] https://www.postgresql.org/message-id/CAH2-Wzmfc+P3pC_u1DsgM3LawURzKx5PqZmxtGLgSXBf8gFs8A@mail.gmail.com", "msg_date": "Sat, 10 Jul 2021 02:40:03 -0400", "msg_from": "Corey Huinker <corey.huinker@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Grammar railroad diagram" } ]
[ { "msg_contents": "Hi,\nI was looking at :\nRelax transactional restrictions on ALTER TYPE ... ADD VALUE (redux).\n\nIn check_safe_enum_use():\n\n+ if (!TransactionIdIsInProgress(xmin) &&\n+ TransactionIdDidCommit(xmin))\n+ return;\n\nSince the condition would be true only when TransactionIdDidCommit()\nreturns true, I think the call to TransactionIdIsInProgress is not needed.\nIf transaction for xmin is committed, the transaction cannot be in progress\nat the same time.\n\nPlease see the simple patch for removing the redundant check.\n\nThanks", "msg_date": "Sat, 3 Jul 2021 18:45:12 -0700", "msg_from": "Zhihong Yu <zyu@yugabyte.com>", "msg_from_op": true, "msg_subject": "Removing redundant check for transaction in progress in\n check_safe_enum_use" }, { "msg_contents": "On Sun, 4 Jul 2021, 03:40 Zhihong Yu, <zyu@yugabyte.com> wrote:\n>\n> Hi,\n> I was looking at :\n> Relax transactional restrictions on ALTER TYPE ... ADD VALUE (redux).\n>\n> In check_safe_enum_use():\n>\n> + if (!TransactionIdIsInProgress(xmin) &&\n> + TransactionIdDidCommit(xmin))\n> + return;\n>\n> Since the condition would be true only when TransactionIdDidCommit() returns true, I think the call to TransactionIdIsInProgress is not needed.\n> If transaction for xmin is committed, the transaction cannot be in progress at the same time.\n\nI'm not sure that removing the !TransactionIdIsInProgress-check is\ncorrect. The comment in heapam_visibility.c:13 explains that we need\nto check TransactionIdIsInProgress before TransactionIdDidCommit in\nnon-MVCC snapshots, and I'm fairly certain that check_safe_enum_use()\nis not guaranteed to run only in MVCC snapshots (at least its\ndocumentation does not warn against non-MVCC snapshots).\n\nKind regards,\n\nMatthias van de Meent\n\n\n", "msg_date": "Tue, 6 Jul 2021 14:29:51 +0200", "msg_from": "Matthias van de Meent <boekewurm+postgres@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Removing redundant check for transaction in progress in\n check_safe_enum_use" } ]
[ { "msg_contents": "Here https://gist.github.com/mingodad/49291e0e9505522c66fcd3fcea4a939d I \nposted the postgresql-13.3/src/backend/parser/gram.y with positional \nreferences by named references that is supported by bison for some time now.\n\nIt was done with a custom script and some comments are missing, if there \nis any interest in accept it I could try work on it to include the \nmissing comments and a different code layout.\n\nIt compiles on ubuntu 18.04.\n\nI did a similar contribution here \nhttps://github.com/facebookincubator/CG-SQL/pull/6\n\nAnd here is snippet of how it looks like:\n\n====\n\nopt_all_clause:\n     ALL    { $opt_all_clause = NIL;}\n     | /*EMPTY*/    { $opt_all_clause = NIL; }\n         ;\n\nopt_sort_clause:\n     sort_clause    { $opt_sort_clause = $sort_clause;}\n     | /*EMPTY*/    { $opt_sort_clause = NIL; }\n         ;\n\nsort_clause:\n     ORDER BY sortby_list    { $sort_clause = $sortby_list; }\n         ;\n\nsortby_list:\n     sortby    { $sortby_list = list_make1($sortby); }\n     | sortby_list[rhs_1] ',' sortby    { $$ /* sortby_list */ = \nlappend($rhs_1, $sortby); }\n         ;\n\nsortby:\n     a_expr USING qual_all_Op opt_nulls_order    {\n                     $sortby = makeNode(SortBy);\n                     $sortby->node = $a_expr;\n                     $sortby->sortby_dir = SORTBY_USING;\n                     $sortby->sortby_nulls = $opt_nulls_order;\n                     $sortby->useOp = $qual_all_Op;\n                     $sortby->location = @qual_all_Op;\n                 }\n     | a_expr opt_asc_desc opt_nulls_order    {\n                     $sortby = makeNode(SortBy);\n                     $sortby->node = $a_expr;\n                     $sortby->sortby_dir = $opt_asc_desc;\n                     $sortby->sortby_nulls = $opt_nulls_order;\n                     $sortby->useOp = NIL;\n                     $sortby->location = -1;        /* no operator */\n                 }\n         ;\n\n====\n\nCheers !\n\n\n\n", "msg_date": "Sun, 4 Jul 2021 14:33:06 +0200", "msg_from": "Domingo Alvarez Duarte <mingodad@gmail.com>", "msg_from_op": true, "msg_subject": "PostgreSQL-13.3 parser.y with positional references by named\n references" }, { "msg_contents": "Domingo Alvarez Duarte <mingodad@gmail.com> writes:\n> Here https://gist.github.com/mingodad/49291e0e9505522c66fcd3fcea4a939d I \n> posted the postgresql-13.3/src/backend/parser/gram.y with positional \n> references by named references that is supported by bison for some time now.\n\nWhen is \"some time now\"?\n\nCurrently, we support bison versions back to 1.875. While we'd be\nwilling to raise that bar as soon as a good reason to do so comes\nalong, I'm not sure that getting rid of $N notation is a sufficient\nreason.\n\nIndeed, I'd say getting rid of $$ is a strict loss; the changes you\nshow make actions much more verbose but certainly not any more\nreadable. Having a special notation for a rule's output seems to me\nlike a good thing not a bad one. The examples of named notation in\nthe Bison docs don't seem like unconditional wins either; they're not\nvery concise, and the contortions you're forced into when the same\nnonterminal type is used more than once in a rule are just horrid.\n\nI do see the point about it being annoying to update $N references\nwhen a rule is changed. But this solution has enough downsides that\nI'm not sure it's a net win. Maybe if it were applied selectively,\nto just the longer DDL productions, it'd be worth doing?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 04 Jul 2021 11:58:27 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: PostgreSQL-13.3 parser.y with positional references by named\n references" }, { "msg_contents": "On 04.07.21 17:58, Tom Lane wrote:\n> Domingo Alvarez Duarte <mingodad@gmail.com> writes:\n>> Here https://gist.github.com/mingodad/49291e0e9505522c66fcd3fcea4a939d I\n>> posted the postgresql-13.3/src/backend/parser/gram.y with positional\n>> references by named references that is supported by bison for some time now.\n> \n> When is \"some time now\"?\n\nrelease 2.5 (2011-05-14)\n\n> I do see the point about it being annoying to update $N references\n> when a rule is changed. But this solution has enough downsides that\n> I'm not sure it's a net win. Maybe if it were applied selectively,\n> to just the longer DDL productions, it'd be worth doing?\n\nI agree that it should be applied selectively.\n\n\n", "msg_date": "Wed, 7 Jul 2021 10:28:38 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: PostgreSQL-13.3 parser.y with positional references by named\n references" }, { "msg_contents": "Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n\n> On 04.07.21 17:58, Tom Lane wrote:\n>> Domingo Alvarez Duarte <mingodad@gmail.com> writes:\n>>> Here https://gist.github.com/mingodad/49291e0e9505522c66fcd3fcea4a939d I\n>>> posted the postgresql-13.3/src/backend/parser/gram.y with positional\n>>> references by named references that is supported by bison for some time now.\n>>\n>> When is \"some time now\"?\n>\n> release 2.5 (2011-05-14)\n\nDo we support building on RHEL6? It only ships bison 2.4, so that would\nmean people building on that would have to install it seprately.\n\n- ilmari\n\n\n", "msg_date": "Wed, 07 Jul 2021 14:16:05 +0100", "msg_from": "ilmari@ilmari.org (Dagfinn Ilmari =?utf-8?Q?Manns=C3=A5ker?=)", "msg_from_op": false, "msg_subject": "Re: PostgreSQL-13.3 parser.y with positional references by named\n references" }, { "msg_contents": "ilmari@ilmari.org (Dagfinn Ilmari =?utf-8?Q?Manns=C3=A5ker?=) writes:\n> Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n>> On 04.07.21 17:58, Tom Lane wrote:\n>>> When is \"some time now\"?\n\n>> release 2.5 (2011-05-14)\n\n> Do we support building on RHEL6? It only ships bison 2.4, so that would\n> mean people building on that would have to install it seprately.\n\nA quick look through the buildfarm shows these animals that would be\nunhappy:\n\n sysname | snapshot | l gaur | 2021-07-03 22:56:25 | configure: using bison (GNU Bison) 1.875\n prairiedog | 2021-07-07 06:38:15 | configure: using bison (GNU Bison) 1.875\n locust | 2021-07-07 07:15:22 | configure: using bison (GNU Bison) 2.3\n longfin | 2021-07-07 04:39:09 | configure: using bison (GNU Bison) 2.3\n sifaka | 2021-07-07 04:33:58 | configure: using bison (GNU Bison) 2.3\n anole | 2021-07-01 15:50:38 | configure: using bison (GNU Bison) 2.4.1\n gharial | 2021-07-05 08:00:48 | configure: using bison (GNU Bison) 2.4.1\n walleye | 2021-07-07 06:55:35 | configure: using bison (GNU Bison) 2.4.2\n jacana | 2021-07-06 03:00:44 | Jul 05 23:00:49 configure: using bison (GNU Bison) 2.4.2\n\n(hmm, almost half of those are mine :-(). The main thing I take away\nfrom this is that Apple is still shipping 2.3, which means that requiring\n2.5 would completely break the ability to build on macOS without using\nanything from homebrew or macports. That seems like moving the goalposts\npretty far for a minor developer-convenience feature.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 07 Jul 2021 11:14:53 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: PostgreSQL-13.3 parser.y with positional references by named\n references" }, { "msg_contents": "I understand the concerns but I would not qualify it as \"minor \ndeveloper-convenience feature\".\n\nI'm not impartial because the initial suggestion was mine, just to add \nmore options to be considered:\n\nWhat if the generated parser/lexer be present in the tarball distributions ?\n\nCheers !\n\nOn 7/7/21 17:14, Tom Lane wrote:\n> ilmari@ilmari.org (Dagfinn Ilmari =?utf-8?Q?Manns=C3=A5ker?=) writes:\n>> Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n>>> On 04.07.21 17:58, Tom Lane wrote:\n>>>> When is \"some time now\"?\n>>> release 2.5 (2011-05-14)\n>> Do we support building on RHEL6? It only ships bison 2.4, so that would\n>> mean people building on that would have to install it seprately.\n> A quick look through the buildfarm shows these animals that would be\n> unhappy:\n>\n> sysname | snapshot | l gaur | 2021-07-03 22:56:25 | configure: using bison (GNU Bison) 1.875\n> prairiedog | 2021-07-07 06:38:15 | configure: using bison (GNU Bison) 1.875\n> locust | 2021-07-07 07:15:22 | configure: using bison (GNU Bison) 2.3\n> longfin | 2021-07-07 04:39:09 | configure: using bison (GNU Bison) 2.3\n> sifaka | 2021-07-07 04:33:58 | configure: using bison (GNU Bison) 2.3\n> anole | 2021-07-01 15:50:38 | configure: using bison (GNU Bison) 2.4.1\n> gharial | 2021-07-05 08:00:48 | configure: using bison (GNU Bison) 2.4.1\n> walleye | 2021-07-07 06:55:35 | configure: using bison (GNU Bison) 2.4.2\n> jacana | 2021-07-06 03:00:44 | Jul 05 23:00:49 configure: using bison (GNU Bison) 2.4.2\n>\n> (hmm, almost half of those are mine :-(). The main thing I take away\n> from this is that Apple is still shipping 2.3, which means that requiring\n> 2.5 would completely break the ability to build on macOS without using\n> anything from homebrew or macports. That seems like moving the goalposts\n> pretty far for a minor developer-convenience feature.\n>\n> \t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 7 Jul 2021 17:32:49 +0200", "msg_from": "Domingo Alvarez Duarte <mingodad@gmail.com>", "msg_from_op": true, "msg_subject": "Re: PostgreSQL-13.3 parser.y with positional references by named\n references" }, { "msg_contents": "Domingo Alvarez Duarte <mingodad@gmail.com> writes:\n> What if the generated parser/lexer be present in the tarball distributions ?\n\nIt is. The discussion here is about developer convenience (how painful\nis it to read or modify a rule) versus developer convenience (what hoops\nhave you got to jump through to install a version of Bison that will\nwork, when building from a git checkout). Note however that the set of\ndevelopers affected by the second aspect is much larger than those\naffected by the first. Lots of people who work on PG never mess with\nthe grammar.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 07 Jul 2021 12:05:34 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: PostgreSQL-13.3 parser.y with positional references by named\n references" } ]
[ { "msg_contents": "Over in [1] it is demonstrated that with CLOBBER_CACHE_ALWAYS enabled,\ninitdb accounts for a full 50% of the runtime of \"make check-world\"\n(well, actually of the buildfarm cycle, which is not quite exactly\nthat but close). Since initdb certainly doesn't cost that much\nnormally, I wondered why it is so negatively affected by CCA. Some\nperf measuring led me to LookupOpclassInfo, and specifically this bit:\n\n /*\n * When testing for cache-flush hazards, we intentionally disable the\n * operator class cache and force reloading of the info on each call. This\n * is helpful because we want to test the case where a cache flush occurs\n * while we are loading the info, and it's very hard to provoke that if\n * this happens only once per opclass per backend.\n */\n#ifdef CLOBBER_CACHE_ENABLED\n if (debug_invalidate_system_caches_always > 0)\n opcentry->valid = false;\n#endif\n\nDiking that out halves initdb's CCA runtime. Turns out it also\nroughly halves the runtime of the core regression tests under CCA,\nso this doesn't explain why initdb seems so disproportionately\naffected by CCA.\n\nHowever, seeing that this single choice is accounting for half the\ncost of CCA testing, we really have to ask whether it's worth that.\nThis code was added by my commit 03ffc4d6d of 2007-11-28, about which\nI wrote:\n\n Improve test coverage of CLOBBER_CACHE_ALWAYS by having it also force\n reloading of operator class information on each use of LookupOpclassInfo.\n Had this been in place a year ago, it would have helped me find a bug\n in the then-new 'operator family' code. Now that we have a build farm\n member testing CLOBBER_CACHE_ALWAYS on a regular basis, it seems worth\n expending a little bit of effort here.\n\nI'm now a little dubious about my claim that this would have helped find\nany bugs. Invalidating a finished OpClassCache entry does not model any\nreal-world scenario, because as noted elsewhere in LookupOpclassInfo,\nonce such a cache entry is populated it is kept for the rest of the\nsession. Also, the claim in the comment that we need this to test\na cache flush during load seems like nonsense: if we have\ndebug_invalidate_system_caches_always turned on, then we'll test\nthe effects of such flushes throughout the initial population of\na cache entry. Doing it over and over again adds nothing.\n\nSo I'm now fairly strongly tempted to remove this code outright\n(effectively reverting 03ffc4d6d). Another possibility now that\nwe have debug_invalidate_system_caches_always is to increase the\nthreshold at which this happens, making it more like\nCLOBBER_CACHE_RECURSIVE.\n\nThoughts?\n\n\t\t\tregards, tom lane\n\n[1] https://www.postgresql.org/message-id/1289102.1625353189%40sss.pgh.pa.us\n\n\n", "msg_date": "Sun, 04 Jul 2021 15:57:05 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Excessive cost of OpClassCache flushes in CLOBBER_CACHE_ALWAYS mode" }, { "msg_contents": "\nOn 7/4/21 3:57 PM, Tom Lane wrote:\n> Over in [1] it is demonstrated that with CLOBBER_CACHE_ALWAYS enabled,\n> initdb accounts for a full 50% of the runtime of \"make check-world\"\n> (well, actually of the buildfarm cycle, which is not quite exactly\n> that but close). Since initdb certainly doesn't cost that much\n> normally, I wondered why it is so negatively affected by CCA. Some\n> perf measuring led me to LookupOpclassInfo, and specifically this bit:\n>\n> /*\n> * When testing for cache-flush hazards, we intentionally disable the\n> * operator class cache and force reloading of the info on each call. This\n> * is helpful because we want to test the case where a cache flush occurs\n> * while we are loading the info, and it's very hard to provoke that if\n> * this happens only once per opclass per backend.\n> */\n> #ifdef CLOBBER_CACHE_ENABLED\n> if (debug_invalidate_system_caches_always > 0)\n> opcentry->valid = false;\n> #endif\n>\n> Diking that out halves initdb's CCA runtime. Turns out it also\n> roughly halves the runtime of the core regression tests under CCA,\n> so this doesn't explain why initdb seems so disproportionately\n> affected by CCA.\n>\n> However, seeing that this single choice is accounting for half the\n> cost of CCA testing, we really have to ask whether it's worth that.\n> This code was added by my commit 03ffc4d6d of 2007-11-28, about which\n> I wrote:\n>\n> Improve test coverage of CLOBBER_CACHE_ALWAYS by having it also force\n> reloading of operator class information on each use of LookupOpclassInfo.\n> Had this been in place a year ago, it would have helped me find a bug\n> in the then-new 'operator family' code. Now that we have a build farm\n> member testing CLOBBER_CACHE_ALWAYS on a regular basis, it seems worth\n> expending a little bit of effort here.\n>\n> I'm now a little dubious about my claim that this would have helped find\n> any bugs. Invalidating a finished OpClassCache entry does not model any\n> real-world scenario, because as noted elsewhere in LookupOpclassInfo,\n> once such a cache entry is populated it is kept for the rest of the\n> session. Also, the claim in the comment that we need this to test\n> a cache flush during load seems like nonsense: if we have\n> debug_invalidate_system_caches_always turned on, then we'll test\n> the effects of such flushes throughout the initial population of\n> a cache entry. Doing it over and over again adds nothing.\n>\n> So I'm now fairly strongly tempted to remove this code outright\n> (effectively reverting 03ffc4d6d). Another possibility now that\n> we have debug_invalidate_system_caches_always is to increase the\n> threshold at which this happens, making it more like\n> CLOBBER_CACHE_RECURSIVE.\n>\n> Thoughts?\n>\n> \t\t\t\n\n\nIf we don't think it's adding anything useful just rip it out. We don't\ngenerally keep code hanging around just on the off chance it might be\nuseful some day.\n\n\ncheers\n\n\nandrew\n\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Mon, 5 Jul 2021 07:32:08 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: Excessive cost of OpClassCache flushes in CLOBBER_CACHE_ALWAYS\n mode" }, { "msg_contents": "Andrew Dunstan <andrew@dunslane.net> writes:\n> On 7/4/21 3:57 PM, Tom Lane wrote:\n>> I'm now a little dubious about my claim that this would have helped find\n>> any bugs. Invalidating a finished OpClassCache entry does not model any\n>> real-world scenario, because as noted elsewhere in LookupOpclassInfo,\n>> once such a cache entry is populated it is kept for the rest of the\n>> session. Also, the claim in the comment that we need this to test\n>> a cache flush during load seems like nonsense: if we have\n>> debug_invalidate_system_caches_always turned on, then we'll test\n>> the effects of such flushes throughout the initial population of\n>> a cache entry. Doing it over and over again adds nothing.\n>> \n>> So I'm now fairly strongly tempted to remove this code outright\n>> (effectively reverting 03ffc4d6d). Another possibility now that\n>> we have debug_invalidate_system_caches_always is to increase the\n>> threshold at which this happens, making it more like\n>> CLOBBER_CACHE_RECURSIVE.\n\n> If we don't think it's adding anything useful just rip it out. We don't\n> generally keep code hanging around just on the off chance it might be\n> useful some day.\n\nI did a little more research about the origins of 03ffc4d6d.\nI found this thread:\n\nhttps://www.postgresql.org/message-id/flat/2988.1196271930%40sss.pgh.pa.us#a7dd1ce92f5470ba5ad2e1be03d40802\n\nThat points back to commit 0b56be834, which fixed the oversight that\nOpclassOidIndexId had not been marked as a critical system index,\nand claims that making LookupOpclassInfo invalidate entries would\nhave helped identify that.\n\nTo test that, I tried removing OpclassOidIndexId from the list of\ncritical system indexes. I found that (a) if I also remove the\ninvalidation code in LookupOpclassInfo, then things seem to proceed\nnormally even in CCA mode, but (b) with that code, we get into\ninfinite recursion, continually trying to rebuild that index's\nrelcache entry. Once criticalRelcachesBuilt is set, LookupOpclassInfo\nassumes it can use catalog index scans for all opclasses, even those\nneeded for indexes it's going to rely on, which is why the recursion\nhappens. This might arguably be a bug in that CLOBBER_CACHE_ENABLED\ncode itself: maybe it needs to avoid invalidating the entries for\nOID_BTREE_OPS_OID and INT2_BTREE_OPS_OID. If I make it do so, the\ninfinite recursion disappears even without OpclassOidIndexId as a\ncritical index.\n\nSo on the one hand maybe that logic is too simplistic, but on the\nother hand it might help identify missed critical indexes in future\ntoo. That puts it in the same category as the sorts of bugs that\n\"debug_invalidate_system_caches_always > 1\" is meant to help with,\nso now I'm content to change it that way.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 05 Jul 2021 12:46:04 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: Excessive cost of OpClassCache flushes in CLOBBER_CACHE_ALWAYS\n mode" } ]
[ { "msg_contents": "As I've been poking around in this area, I find myself growing\nincreasingly annoyed at the new GUC name\n\"debug_invalidate_system_caches_always\". It is too d*mn long.\nIt's a serious pain to type in any context where you don't have\nautocomplete to help you. I've kept referring to this type of\ntesting as CLOBBER_CACHE_ALWAYS testing, even though that name is\nnow obsolete, just because it's so much shorter. I think we need\nto reconsider this name while we still can.\n\nI do agree with the \"debug_\" prefix given that it's now visible to\nusers. However, it doesn't seem that hard to save some space in\nthe rest of the name. The word \"system\" is adding nothing of value,\nand the word \"always\" seems rather confusing --- if it does\nsomething \"always\", why is there more than one level? So a simple\nproposal is to rename it to \"debug_invalidate_caches\".\n\nHowever, I think we should also give serious consideration to\n\"debug_clobber_cache\" or \"debug_clobber_cache_always\" for continuity\nwith past practice (though it still feels like \"always\" is a good\nword to lose now). \"debug_clobber_caches\" is another reasonable\nvariant.\n\nThoughts?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 04 Jul 2021 16:27:13 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "\"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "On Sun, Jul 04, 2021 at 04:27:13PM -0400, Tom Lane wrote:\n> As I've been poking around in this area, I find myself growing\n> increasingly annoyed at the new GUC name\n> \"debug_invalidate_system_caches_always\". It is too d*mn long.\n> It's a serious pain to type in any context where you don't have\n> autocomplete to help you. I've kept referring to this type of\n> testing as CLOBBER_CACHE_ALWAYS testing, even though that name is\n> now obsolete, just because it's so much shorter. I think we need\n> to reconsider this name while we still can.\n> \n> I do agree with the \"debug_\" prefix given that it's now visible to\n> users. However, it doesn't seem that hard to save some space in\n> the rest of the name. The word \"system\" is adding nothing of value,\n> and the word \"always\" seems rather confusing --- if it does\n> something \"always\", why is there more than one level? So a simple\n> proposal is to rename it to \"debug_invalidate_caches\".\n\nI agree with all that. The word \"always\" has been misinformation, given the\nmultiple levels available.\n\n> However, I think we should also give serious consideration to\n> \"debug_clobber_cache\" or \"debug_clobber_cache_always\" for continuity\n> with past practice (though it still feels like \"always\" is a good\n> word to lose now). \"debug_clobber_caches\" is another reasonable\n> variant.\n\nhttps://en.wikipedia.org/wiki/Clobbering refers to cases where storage had no\nchanges to its accessibility but now contains different data. That doesn't\nmatch InvalidateSystemCaches() especially well, so I think dropping that word\nhas been a good step. Some other shorter terms could be debug_flush_caches,\ndebug_rebuild_caches, or debug_expire_caches. (debug_caches is tempting, but\nthat may ensnare folks looking for extra logging rather than a big slowdown.)\n\n\n", "msg_date": "Sun, 4 Jul 2021 14:12:34 -0700", "msg_from": "Noah Misch <noah@leadboat.com>", "msg_from_op": false, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "On Sun, Jul 04, 2021 at 04:27:13PM -0400, Tom Lane wrote:\n> and the word \"always\" seems rather confusing --- if it does\n> something \"always\", why is there more than one level? So a simple\n> proposal is to rename it to \"debug_invalidate_caches\".\n\n+1 to remove \"always\"\n\n-- \nJustin\n\n\n", "msg_date": "Sun, 4 Jul 2021 17:22:59 -0500", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "At Sun, 4 Jul 2021 14:12:34 -0700, Noah Misch <noah@leadboat.com> wrote in \n> > However, I think we should also give serious consideration to\n> > \"debug_clobber_cache\" or \"debug_clobber_cache_always\" for continuity\n> > with past practice (though it still feels like \"always\" is a good\n> > word to lose now). \"debug_clobber_caches\" is another reasonable\n> > variant.\n> \n> https://en.wikipedia.org/wiki/Clobbering refers to cases where storage had no\n> changes to its accessibility but now contains different data. That doesn't\n> match InvalidateSystemCaches() especially well, so I think dropping that word\n> has been a good step. Some other shorter terms could be debug_flush_caches,\n> debug_rebuild_caches, or debug_expire_caches. (debug_caches is tempting, but\n\n(I murmur that I think \"drop\" is also usable here.)\n\n> that may ensnare folks looking for extra logging rather than a big slowdown.)\n\nI agree to this. (And one more +1 to removing \"always\".)\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center\n\n\n", "msg_date": "Mon, 05 Jul 2021 13:52:46 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": false, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "On Mon, Jul 5, 2021 at 1:57 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> As I've been poking around in this area, I find myself growing\n> increasingly annoyed at the new GUC name\n> \"debug_invalidate_system_caches_always\". It is too d*mn long.\n> It's a serious pain to type in any context where you don't have\n> autocomplete to help you. I've kept referring to this type of\n> testing as CLOBBER_CACHE_ALWAYS testing, even though that name is\n> now obsolete, just because it's so much shorter. I think we need\n> to reconsider this name while we still can.\n>\n> I do agree with the \"debug_\" prefix given that it's now visible to\n> users. However, it doesn't seem that hard to save some space in\n> the rest of the name. The word \"system\" is adding nothing of value,\n> and the word \"always\" seems rather confusing --- if it does\n> something \"always\", why is there more than one level? So a simple\n> proposal is to rename it to \"debug_invalidate_caches\".\n>\n> However, I think we should also give serious consideration to\n> \"debug_clobber_cache\" or \"debug_clobber_cache_always\" for continuity\n> with past practice (though it still feels like \"always\" is a good\n> word to lose now). \"debug_clobber_caches\" is another reasonable\n> variant.\n>\n> Thoughts?\n\n +1 for the \"debug_clobber_caches\" variant, easy to remember.\n\nRegards,\nAmul\n\n\n", "msg_date": "Mon, 5 Jul 2021 11:03:13 +0530", "msg_from": "Amul Sul <sulamul@gmail.com>", "msg_from_op": false, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "On Mon, Jul 5, 2021 at 1:57 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> As I've been poking around in this area, I find myself growing\n> increasingly annoyed at the new GUC name\n> \"debug_invalidate_system_caches_always\". It is too d*mn long.\n> It's a serious pain to type in any context where you don't have\n> autocomplete to help you. I've kept referring to this type of\n> testing as CLOBBER_CACHE_ALWAYS testing, even though that name is\n> now obsolete, just because it's so much shorter. I think we need\n> to reconsider this name while we still can.\n>\n> I do agree with the \"debug_\" prefix given that it's now visible to\n> users. However, it doesn't seem that hard to save some space in\n> the rest of the name. The word \"system\" is adding nothing of value,\n> and the word \"always\" seems rather confusing --- if it does\n> something \"always\", why is there more than one level? So a simple\n> proposal is to rename it to \"debug_invalidate_caches\".\n>\n> However, I think we should also give serious consideration to\n> \"debug_clobber_cache\" or \"debug_clobber_cache_always\" for continuity\n> with past practice (though it still feels like \"always\" is a good\n> word to lose now). \"debug_clobber_caches\" is another reasonable\n> variant.\n>\n> Thoughts?\n\n+1. IMO, debug_clobber_caches is better because it is simple. And\nalso, since the invalidation happens on multiple system caches,\ndebug_clobber_caches is preferable than debug_clobber_cache.\n\nRegards,\nBharath Rupireddy.\n\n\n", "msg_date": "Mon, 5 Jul 2021 11:48:17 +0530", "msg_from": "Bharath Rupireddy <bharath.rupireddyforpostgres@gmail.com>", "msg_from_op": false, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "\nOn 7/4/21 4:27 PM, Tom Lane wrote:\n> As I've been poking around in this area, I find myself growing\n> increasingly annoyed at the new GUC name\n> \"debug_invalidate_system_caches_always\". It is too d*mn long.\n> It's a serious pain to type in any context where you don't have\n> autocomplete to help you. I've kept referring to this type of\n> testing as CLOBBER_CACHE_ALWAYS testing, even though that name is\n> now obsolete, just because it's so much shorter. I think we need\n> to reconsider this name while we still can.\n>\n> I do agree with the \"debug_\" prefix given that it's now visible to\n> users. However, it doesn't seem that hard to save some space in\n> the rest of the name. The word \"system\" is adding nothing of value,\n> and the word \"always\" seems rather confusing --- if it does\n> something \"always\", why is there more than one level? So a simple\n> proposal is to rename it to \"debug_invalidate_caches\".\n>\n> However, I think we should also give serious consideration to\n> \"debug_clobber_cache\" or \"debug_clobber_cache_always\" for continuity\n> with past practice (though it still feels like \"always\" is a good\n> word to lose now). \"debug_clobber_caches\" is another reasonable\n> variant.\n>\n\n\n+1 for debug_invalidate_caches - it seems to have the most content and\nleast noise. Second choice would be debug_clobber_caches.\n\n\ncheers\n\n\nandrew\n\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Mon, 5 Jul 2021 07:25:38 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "Noah Misch <noah@leadboat.com> writes:\n> On Sun, Jul 04, 2021 at 04:27:13PM -0400, Tom Lane wrote:\n>> However, I think we should also give serious consideration to\n>> \"debug_clobber_cache\" or \"debug_clobber_cache_always\" for continuity\n>> with past practice (though it still feels like \"always\" is a good\n>> word to lose now). \"debug_clobber_caches\" is another reasonable\n>> variant.\n\n> https://en.wikipedia.org/wiki/Clobbering refers to cases where storage had no\n> changes to its accessibility but now contains different data. That doesn't\n> match InvalidateSystemCaches() especially well, so I think dropping that word\n> has been a good step. Some other shorter terms could be debug_flush_caches,\n> debug_rebuild_caches, or debug_expire_caches. (debug_caches is tempting, but\n> that may ensnare folks looking for extra logging rather than a big slowdown.)\n\nI like \"debug_flush_caches\" --- it's short and accurate.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 05 Jul 2021 15:13:41 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "On Tue, Jul 6, 2021 at 12:43 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> Noah Misch <noah@leadboat.com> writes:\n> > On Sun, Jul 04, 2021 at 04:27:13PM -0400, Tom Lane wrote:\n> >> However, I think we should also give serious consideration to\n> >> \"debug_clobber_cache\" or \"debug_clobber_cache_always\" for continuity\n> >> with past practice (though it still feels like \"always\" is a good\n> >> word to lose now). \"debug_clobber_caches\" is another reasonable\n> >> variant.\n>\n> > https://en.wikipedia.org/wiki/Clobbering refers to cases where storage had no\n> > changes to its accessibility but now contains different data. That doesn't\n> > match InvalidateSystemCaches() especially well, so I think dropping that word\n> > has been a good step. Some other shorter terms could be debug_flush_caches,\n> > debug_rebuild_caches, or debug_expire_caches. (debug_caches is tempting, but\n> > that may ensnare folks looking for extra logging rather than a big slowdown.)\n>\n> I like \"debug_flush_caches\" --- it's short and accurate.\n\nDo we always flush the cache entries into the disk? Sometimes we just\ninvalidate the cache entries in the registered invalidation callbacks,\nright? Since we already use the term \"clobber\" in the user visible\nconfig option --clobber-cache, isn't it consistent to use\ndebug_clobber_caches?\n\nRegards,\nBharath Rupireddy.\n\n\n", "msg_date": "Tue, 6 Jul 2021 09:16:54 +0530", "msg_from": "Bharath Rupireddy <bharath.rupireddyforpostgres@gmail.com>", "msg_from_op": false, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "\nOn 7/5/21 11:46 PM, Bharath Rupireddy wrote:\n> On Tue, Jul 6, 2021 at 12:43 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> Noah Misch <noah@leadboat.com> writes:\n>>> On Sun, Jul 04, 2021 at 04:27:13PM -0400, Tom Lane wrote:\n>>>> However, I think we should also give serious consideration to\n>>>> \"debug_clobber_cache\" or \"debug_clobber_cache_always\" for continuity\n>>>> with past practice (though it still feels like \"always\" is a good\n>>>> word to lose now). \"debug_clobber_caches\" is another reasonable\n>>>> variant.\n>>> https://en.wikipedia.org/wiki/Clobbering refers to cases where storage had no\n>>> changes to its accessibility but now contains different data. That doesn't\n>>> match InvalidateSystemCaches() especially well, so I think dropping that word\n>>> has been a good step. Some other shorter terms could be debug_flush_caches,\n>>> debug_rebuild_caches, or debug_expire_caches. (debug_caches is tempting, but\n>>> that may ensnare folks looking for extra logging rather than a big slowdown.)\n>> I like \"debug_flush_caches\" --- it's short and accurate.\n> Do we always flush the cache entries into the disk? Sometimes we just\n> invalidate the cache entries in the registered invalidation callbacks,\n> right? Since we already use the term \"clobber\" in the user visible\n> config option --clobber-cache, isn't it consistent to use\n> debug_clobber_caches?\n>\n\nI think 'flush' here means simply 'discard'. Maybe that would be a\nbetter word to use.\n\n\ncheers\n\n\nandrew\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Tue, 6 Jul 2021 05:50:14 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "Andrew Dunstan <andrew@dunslane.net> writes:\n> On 7/5/21 11:46 PM, Bharath Rupireddy wrote:\n>> On Tue, Jul 6, 2021 at 12:43 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>>> I like \"debug_flush_caches\" --- it's short and accurate.\n\n>> Do we always flush the cache entries into the disk? Sometimes we just\n>> invalidate the cache entries in the registered invalidation callbacks,\n>> right? Since we already use the term \"clobber\" in the user visible\n>> config option --clobber-cache, isn't it consistent to use\n>> debug_clobber_caches?\n\n> I think 'flush' here means simply 'discard'. Maybe that would be a\n> better word to use.\n\n\"Discard\" could be misinterpreted too, no doubt. None of these words\nhave one single exact meaning, so I have only limited patience for\nthis sort of argumentation.\n\n(As for initdb's \"--clobber-cache\", I'm assuming we'd rename that to\nmatch whatever we come up with here. It is what it is now only because\nI was unwilling to call it \"--use-debug-invalidate-system-caches-always\".)\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Tue, 06 Jul 2021 09:24:28 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "On 04.07.21 22:27, Tom Lane wrote:\n> I do agree with the \"debug_\" prefix given that it's now visible to\n> users. However, it doesn't seem that hard to save some space in\n> the rest of the name. The word \"system\" is adding nothing of value,\n> and the word \"always\" seems rather confusing --- if it does\n> something \"always\", why is there more than one level? So a simple\n> proposal is to rename it to \"debug_invalidate_caches\".\n\nI think we can definitely drop the \"always\". Not so much the \"system\", \nsince there are other caches, but it would be ok if we want it shorter.\n\n> However, I think we should also give serious consideration to\n> \"debug_clobber_cache\" or \"debug_clobber_cache_always\" for continuity\n> with past practice (though it still feels like \"always\" is a good\n> word to lose now). \"debug_clobber_caches\" is another reasonable\n> variant.\n\nThe clobbering doesn't actually happen unless you turn on \nCLOBBER_FREED_MEMORY, so it would be good to keep that separate.\n\n\n", "msg_date": "Wed, 7 Jul 2021 10:37:15 +0200", "msg_from": "Peter Eisentraut <peter.eisentraut@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n> The clobbering doesn't actually happen unless you turn on \n> CLOBBER_FREED_MEMORY, so it would be good to keep that separate.\n\nFair point. What do you think of the alternative proposals\n\"debug_flush_caches\", \"debug_discard_caches\", etc?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 07 Jul 2021 11:16:57 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "On Wed, Jul 7, 2021 at 11:17 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:\n> > The clobbering doesn't actually happen unless you turn on\n> > CLOBBER_FREED_MEMORY, so it would be good to keep that separate.\n>\n> Fair point. What do you think of the alternative proposals\n> \"debug_flush_caches\", \"debug_discard_caches\", etc?\n\nI like debug_discard_caches best. I have no preference between\ndebug_flush_caches and debug_clobber_caches; neither seems horrid. I\nagree that what we're doing here is not precisely a \"clobber\" in the\nusual sense, but the people who are apt to be using it will probably\nbe aware of that. Yet, it's good to try to clear things up for future\nhackers, and IMHO debug_discard_caches is the clearest, so that's why\nI like it a little better than the other choices.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Thu, 8 Jul 2021 13:33:28 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "Robert Haas <robertmhaas@gmail.com> writes:\n> On Wed, Jul 7, 2021 at 11:17 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> Fair point. What do you think of the alternative proposals\n>> \"debug_flush_caches\", \"debug_discard_caches\", etc?\n\n> I like debug_discard_caches best.\n\nI can live with that. Anyone strongly against it?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 08 Jul 2021 16:34:55 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "On 2021-Jul-08, Tom Lane wrote:\n\n> Robert Haas <robertmhaas@gmail.com> writes:\n> > On Wed, Jul 7, 2021 at 11:17 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >> Fair point. What do you think of the alternative proposals\n> >> \"debug_flush_caches\", \"debug_discard_caches\", etc?\n> \n> > I like debug_discard_caches best.\n> \n> I can live with that. Anyone strongly against it?\n\nSeems fine to me.\n\n-- \nÁlvaro Herrera 39°49'30\"S 73°17'W — https://www.EnterpriseDB.com/\n\"Just treat us the way you want to be treated + some extra allowance\n for ignorance.\" (Michael Brusser)\n\n\n", "msg_date": "Thu, 8 Jul 2021 16:59:03 -0400", "msg_from": "Alvaro Herrera <alvherre@alvh.no-ip.org>", "msg_from_op": false, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "On Thu, Jul 08, 2021 at 04:34:55PM -0400, Tom Lane wrote:\n> Robert Haas <robertmhaas@gmail.com> writes:\n> > On Wed, Jul 7, 2021 at 11:17 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >> Fair point. What do you think of the alternative proposals\n> >> \"debug_flush_caches\", \"debug_discard_caches\", etc?\n> \n> > I like debug_discard_caches best.\n> \n> I can live with that. Anyone strongly against it?\n\nI like it.\n\n\n", "msg_date": "Fri, 9 Jul 2021 04:12:11 -0700", "msg_from": "Noah Misch <noah@leadboat.com>", "msg_from_op": false, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" }, { "msg_contents": "Noah Misch <noah@leadboat.com> writes:\n> On Thu, Jul 08, 2021 at 04:34:55PM -0400, Tom Lane wrote:\n>> Robert Haas <robertmhaas@gmail.com> writes:\n>>> I like debug_discard_caches best.\n\n>> I can live with that. Anyone strongly against it?\n\n> I like it.\n\nHearing no votes against, here's a proposed patch for that.\n\n(This is for HEAD; I expect v14 will need additional adjustments\nin release-14.sgml)\n\n\t\t\tregards, tom lane", "msg_date": "Mon, 12 Jul 2021 14:49:12 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": true, "msg_subject": "Re: \"debug_invalidate_system_caches_always\" is too long" } ]
[ { "msg_contents": "Hi hackers,\r\n\r\nWe recently ran into an issue where the visibility map of a relation was corrupt, running Postgres 12.4. The error we'd get when running a SELECT * from this table is:\r\n\r\ncould not access status of transaction 3704450152\r\nDETAIL: Could not open file \"pg_xact/0DCC\": No such file or directory.\r\n\r\nOn the lists I could find several similar reports, but corruption like this could obviously have a very wide range of root causes.. see [1] [2] [3] for example - not all of them have their root cause known.\r\n\r\nThis particular case was similar to reported cases above, but also has some differences.\r\n\r\nThe following query returns ~21.000 rows, which indicates something inconsistent between the visibility map and the pd_all_visible flag on the page:\r\n\r\nselect * from pg_check_frozen('tbl');\r\n\r\nLooking at one of the affected pages with pageinspect:\r\n\r\n=# SELECT lp,lp_off,lp_flags,lp_len,t_xmin,t_xmax,t_field3,t_ctid,t_infomask2,t_infomask,t_hoff,t_oid FROM heap_page_items(get_raw_page('tbl', 726127));\r\n┌────┬────────┬──────────┬────────┬────────────┬────────────┬──────────┬────────────┬─────────────┬────────────┬────────┬───────┐\r\n│ lp │ lp_off │ lp_flags │ lp_len │ t_xmin │ t_xmax │ t_field3 │ t_ctid │ t_infomask2 │ t_infomask │ t_hoff │ t_oid │\r\n├────┼────────┼──────────┼────────┼────────────┼────────────┼──────────┼────────────┼─────────────┼────────────┼────────┼───────┤\r\n│ 1 │ 6328 │ 1 │ 1864 │ 3704450155 │ 3704450155 │ 1 │ (726127,1) │ 249 │ 8339 │ 56 │ ∅ │\r\n│ 2 │ 4464 │ 1 │ 1864 │ 3704450155 │ 3704450155 │ 1 │ (726127,2) │ 249 │ 8339 │ 56 │ ∅ │\r\n│ 3 │ 2600 │ 1 │ 1864 │ 3704450155 │ 3704450155 │ 1 │ (726127,3) │ 249 │ 8339 │ 56 │ ∅ │\r\n│ 4 │ 680 │ 1 │ 1920 │ 3704450155 │ 3704450155 │ 1 │ (726127,4) │ 249 │ 8339 │ 56 │ ∅ │\r\n└────┴────────┴──────────┴────────┴────────────┴────────────┴──────────┴────────────┴─────────────┴────────────┴────────┴───────┘\r\n\r\nt_infomask shows that HEAP_XMIN_COMMITTED and HEAP_XMIN_INVALID bits are both unset.\r\nThis pg_visibility() call shows the inconsistency between VM and page, with PD_ALL_VISIBLE=false\r\n\r\n=# select * from pg_visibility('tbl', 726127);\r\n┌─────────────┬────────────┬────────────────┐\r\n│ all_visible │ all_frozen │ pd_all_visible │\r\n├─────────────┼────────────┼────────────────┤\r\n│ t │ t │ f │\r\n└─────────────┴────────────┴────────────────┘\r\n\r\nLooking at other pages show the same information.\r\nWhat's interesting is that out of the affected tuples returned by pg_check_frozen, over 99% belong to 1 transaction (3704450155 as seen above) and the remaining few are from one other transaction that occurred at roughly the same time.\r\nI find it hard to believe that this is due to some random bit flipping, because many pages are affected, but the \"incorrect\" ones are in total only from two specific transactions which occurred at roughly the same time. There were also no server crashes or other known failures around the time of this transaction. I'm not ruling out any other kind of failure still, but I also cannot really explain how this could have happened.\r\n\r\nThe server has PG12.4 with full_page_writes=on, data_checksums=off. It's a large analytics database. The VM inconsistencies also occur on the streaming replicas.\r\n\r\nI realize these cases are pretty rare and hard to debug, but I wanted to share the information I found so far here for reference. Maybe someone has an idea what occurred, or maybe someone in the future finds it useful when he finds something similar.\r\n\r\nI have no idea how the inconsistency between VM and PD_ALL_VISIBLE started - from looking through the code I can't really find any way how this could occur. However, for it to lead to the problem described here, I believe there should be *no* SELECT that touches that particular page after the insert/update transaction and before the transaction log gets truncated. If the page is read before the transaction log gets truncated, then the hint bit HEAP_XMIN_COMMITTED will get set and future reads will succeed regardless of tx log truncation. One of the replica's had this happen to it: the affected pages are identical to the primary except that the HEAP_XMIN_COMMITTED flag is set (note that the VM inconsistency is still there on the replica though: PD_ALL_VISIBLE=false even though VM shows that all_frozen=all_visible=true). But I can query these rows on the replica without issues, because it doesn't check the tx log when it sees that HEAP_XMIN_COMMITTED is set.\r\n\r\n-Floris\r\n\r\n[1] https://postgrespro.com/list/thread-id/2422376\r\n[2] https://postgrespro.com/list/thread-id/2501800\r\n[3] https://postgrespro.com/list/thread-id/2321949\r\n\r\n", "msg_date": "Sun, 4 Jul 2021 20:43:50 +0000", "msg_from": "Floris Van Nee <florisvannee@Optiver.com>", "msg_from_op": true, "msg_subject": "visibility map corruption" }, { "msg_contents": "On Sun, Jul 4, 2021 at 1:44 PM Floris Van Nee <florisvannee@optiver.com> wrote:\n> We recently ran into an issue where the visibility map of a relation was corrupt, running Postgres 12.4. The error we'd get when running a SELECT * from this table is:\n>\n> could not access status of transaction 3704450152\n> DETAIL: Could not open file \"pg_xact/0DCC\": No such file or directory.\n\nHave you ever used pg_upgrade on this database?\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Sun, 4 Jul 2021 13:51:39 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "> On Sun, Jul 4, 2021 at 1:44 PM Floris Van Nee <florisvannee@optiver.com>\r\n> wrote:\r\n> > We recently ran into an issue where the visibility map of a relation was\r\n> corrupt, running Postgres 12.4. The error we'd get when running a SELECT *\r\n> from this table is:\r\n> >\r\n> > could not access status of transaction 3704450152\r\n> > DETAIL: Could not open file \"pg_xact/0DCC\": No such file or directory.\r\n> \r\n> Have you ever used pg_upgrade on this database?\r\n> \r\n\r\nYes. The last time (from v11 to v12) was in October 2020. The transaction id in the tuples (the one PG is trying to check in the tx log) dated from February 2021. I do believe (but am not 100% certain) that the affected table already existed at the time of the last pg_upgrade though.\r\n", "msg_date": "Sun, 4 Jul 2021 21:26:00 +0000", "msg_from": "Floris Van Nee <florisvannee@Optiver.com>", "msg_from_op": true, "msg_subject": "RE: visibility map corruption" }, { "msg_contents": "On Sun, Jul 4, 2021 at 2:26 PM Floris Van Nee <florisvannee@optiver.com> wrote:\n> > Have you ever used pg_upgrade on this database?\n> >\n>\n> Yes. The last time (from v11 to v12) was in October 2020. The transaction id in the tuples (the one PG is trying to check in the tx log) dated from February 2021. I do believe (but am not 100% certain) that the affected table already existed at the time of the last pg_upgrade though.\n\nI wonder if it's related to this issue:\n\nhttps://www.postgresql.org/message-id/20210423234256.hwopuftipdmp3okf@alap3.anarazel.de\n\nHave you increased autovacuum_freeze_max_age from its default? This\nalready sounds like the kind of database where that would make sense.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Sun, 4 Jul 2021 14:52:07 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "> \r\n> I wonder if it's related to this issue:\r\n> \r\n> https://www.postgresql.org/message-\r\n> id/20210423234256.hwopuftipdmp3okf@alap3.anarazel.de\r\n> \r\n> Have you increased autovacuum_freeze_max_age from its default? This\r\n> already sounds like the kind of database where that would make sense.\r\n> \r\n\r\nautovacuum_freeze_max_age is increased in our setup indeed (it is set to 500M). However, we do regularly run manual VACUUM (FREEZE) on individual tables in the database, including this one. A lot of tables in the database follow an INSERT-only pattern and since it's not running v13 yet, this meant that these tables would only rarely be touched by autovacuum. Autovacuum would sometimes kick in on some of these tables at the same time at unfortunate moments. Therefore we have some regular job running that VACUUM (FREEZE)s tables with a xact age higher than a (low, 10M) threshold ourselves.\r\n\r\n", "msg_date": "Sun, 4 Jul 2021 22:28:25 +0000", "msg_from": "Floris Van Nee <florisvannee@Optiver.com>", "msg_from_op": true, "msg_subject": "RE: visibility map corruption" }, { "msg_contents": "On Sun, Jul 4, 2021 at 10:28:25PM +0000, Floris Van Nee wrote:\n> >\n> > I wonder if it's related to this issue:\n> >\n> > https://www.postgresql.org/message-\n> > id/20210423234256.hwopuftipdmp3okf@alap3.anarazel.de\n> >\n> > Have you increased autovacuum_freeze_max_age from its default? This\n> > already sounds like the kind of database where that would make\n> > sense.\n> >\n>\n> autovacuum_freeze_max_age is increased in our setup indeed (it is\n> set to 500M). However, we do regularly run manual VACUUM (FREEZE)\n> on individual tables in the database, including this one. A lot of\n> tables in the database follow an INSERT-only pattern and since it's\n> not running v13 yet, this meant that these tables would only rarely\n> be touched by autovacuum. Autovacuum would sometimes kick in on some\n> of these tables at the same time at unfortunate moments. Therefore we\n> have some regular job running that VACUUM (FREEZE)s tables with a xact\n> age higher than a (low, 10M) threshold ourselves.\n\nOK, this is confirmation that the pg_resetwal bug, and its use by\npg_upgrade, is a serious issue that needs to be addressed. I am\nprepared to work on it now.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Tue, 6 Jul 2021 13:27:17 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Tue, Jul 6, 2021 at 10:27 AM Bruce Momjian <bruce@momjian.us> wrote:\n> OK, this is confirmation that the pg_resetwal bug, and its use by\n> pg_upgrade, is a serious issue that needs to be addressed. I am\n> prepared to work on it now.\n\nTo be clear, I'm not 100% sure that this is related to the pg_upgrade\n+ \"pg_resetwal sets oldestXid to an invented value\" issue. I am sure\nthat that is a serious issue that needs to be addressed sooner rather\nthan later, though.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Tue, 6 Jul 2021 10:32:24 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Tue, Jul 6, 2021 at 10:32:24AM -0700, Peter Geoghegan wrote:\n> On Tue, Jul 6, 2021 at 10:27 AM Bruce Momjian <bruce@momjian.us> wrote:\n> > OK, this is confirmation that the pg_resetwal bug, and its use by\n> > pg_upgrade, is a serious issue that needs to be addressed. I am\n> > prepared to work on it now.\n> \n> To be clear, I'm not 100% sure that this is related to the pg_upgrade\n> + \"pg_resetwal sets oldestXid to an invented value\" issue. I am sure\n> that that is a serious issue that needs to be addressed sooner rather\n> than later, though.\n\nWell, pg_upgrade corruptions are rare, but so is modifying\nautovacuum_freeze_max_age. If we have a corruption and we know\nautovacuum_freeze_max_age was modified, odds are that is the cause.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Tue, 6 Jul 2021 13:58:08 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Tue, Jul 6, 2021 at 10:58 AM Bruce Momjian <bruce@momjian.us> wrote:\n> Well, pg_upgrade corruptions are rare, but so is modifying\n> autovacuum_freeze_max_age. If we have a corruption and we know\n> autovacuum_freeze_max_age was modified, odds are that is the cause.\n\nMy point is that there isn't necessarily that much use in trying to\ndetermine what really happened here. It would be nice to know for\nsure, but it shouldn't affect what we do about the bug.\n\nIn a way the situation with the bug is simple. Clearly Tom wasn't\nthinking of pg_upgrade when he wrote the relevant pg_resetwal code\nthat sets oldestXid, because pg_upgrade didn't exist at the time. He\nwas thinking of restoring the database to a relatively sane state in\nthe event of some kind of corruption, with all of the uncertainty that\ngoes with that. Nobody noticed that pg_upgrade gets this same behavior\nuntil much more recently.\n\nNow that we see the problem laid out, there isn't much to think about\nthat will affect the response to the issue. At least as far as I can\ntell. We know that pg_upgrade uses pg_resetwal's -x flag in a context\nwhere there is no reason at all to think that the database is corrupt\n-- Tom can't have anticipated that all those years ago. It's easy to\nsee that the behavior is wrong for pg_upgrade, and it's very hard to\nimagine any way in which it might have accidentally made some sense\nall along. We should just carry forward the original oldestXid.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Tue, 6 Jul 2021 11:36:45 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "Peter Geoghegan <pg@bowt.ie> writes:\n> ... We should just carry forward the original oldestXid.\n\nYup. It's a bit silly that we recognized the need to do that\nfor oldestMultiXid yet not for oldestXid.\n\nBTW, is it really necessary for copy_xact_xlog_xid to invoke pg_resetwal\nso many times? Why can't we pass all of the update-this options in one\ncall?\n\nWho's going to do the legwork on this?\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Tue, 06 Jul 2021 14:57:00 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "Hi,\n\nOn 7/6/21 8:57 PM, Tom Lane wrote:\n> Peter Geoghegan <pg@bowt.ie> writes:\n>> ... We should just carry forward the original oldestXid.\n> Yup. It's a bit silly that we recognized the need to do that\n> for oldestMultiXid yet not for oldestXid.\n\nFWIW there is a commitfest entry for it: \nhttps://commitfest.postgresql.org/33/3105/\n\nThanks\n\nBertrand\n\n\n\n", "msg_date": "Tue, 6 Jul 2021 21:33:35 +0200", "msg_from": "\"Drouvot, Bertrand\" <bdrouvot@amazon.com>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Tue, Jul 6, 2021 at 11:57 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> Peter Geoghegan <pg@bowt.ie> writes:\n> > ... We should just carry forward the original oldestXid.\n>\n> Yup. It's a bit silly that we recognized the need to do that\n> for oldestMultiXid yet not for oldestXid.\n\nTrue. But at the same time it somehow doesn't seem silly at all. IME\nsome of the most devious bugs evade detection by hiding in plain\nsight.\n\nIt looks like amcheck's verify_heapam.c functionality almost catches\nbugs like this one. Something for Mark (CC'd) to consider. Does it\nmatter that we usually \"ctx.oldest_xid = ctx.relfrozenxid\", and so\nusually use pg_class.relfrozenxid as our oldest_xid (and not\nShmemVariableCache->oldestXid)? In other words, could we be doing more\nto sanitize ShmemVariableCache->oldestXid, especially when the\nrelation's pg_class.relfrozenxid happens to be set to a real XID?\n\n> BTW, is it really necessary for copy_xact_xlog_xid to invoke pg_resetwal\n> so many times? Why can't we pass all of the update-this options in one\n> call?\n\nNo opinion here.\n\n> Who's going to do the legwork on this?\n\nCan Bruce take care of committing the patch for this? Bruce?\n\nThis should definitely be in the next point release IMV.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Tue, 6 Jul 2021 14:27:34 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "\n\n> On Jul 6, 2021, at 2:27 PM, Peter Geoghegan <pg@bowt.ie> wrote:\n> \n> It looks like amcheck's verify_heapam.c functionality almost catches\n> bugs like this one. Something for Mark (CC'd) to consider. Does it\n> matter that we usually \"ctx.oldest_xid = ctx.relfrozenxid\", and so\n> usually use pg_class.relfrozenxid as our oldest_xid (and not\n> ShmemVariableCache->oldestXid)? In other words, could we be doing more\n> to sanitize ShmemVariableCache->oldestXid, especially when the\n> relation's pg_class.relfrozenxid happens to be set to a real XID?\n\nThanks, Peter, for drawing my attention to this. I had already been following this thread, but had not yet thought about the problem in terms of amcheck.\n\nI will investigate possible solutions in verify_heapam().\n \n—\nMark Dilger\nEnterpriseDB: http://www.enterprisedb.com\nThe Enterprise PostgreSQL Company\n\n\n\n\n\n", "msg_date": "Tue, 6 Jul 2021 15:12:31 -0700", "msg_from": "Mark Dilger <mark.dilger@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Tue, Jul 6, 2021 at 3:12 PM Mark Dilger <mark.dilger@enterprisedb.com> wrote:\n> Thanks, Peter, for drawing my attention to this. I had already been following this thread, but had not yet thought about the problem in terms of amcheck.\n>\n> I will investigate possible solutions in verify_heapam().\n\nThanks! Great that we might be able to make a whole class of bugs\ndetectable with the new amcheck stuff. Glad that I didn't forget about\namcheck myself -- I almost forgot.\n\nWhen I was working on the btree amcheck code, I looked for interesting\nhistorical bugs and made sure that I could detect them. That seems\neven more important with heapam. I wouldn't be surprised if one or two\nimportant invariants were missed, in part because the heapam design\ndidn't have invariants as a starting point.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Tue, 6 Jul 2021 15:20:23 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Tue, Jul 6, 2021 at 02:27:34PM -0700, Peter Geoghegan wrote:\n> > BTW, is it really necessary for copy_xact_xlog_xid to invoke pg_resetwal\n> > so many times? Why can't we pass all of the update-this options in one\n> > call?\n> \n> No opinion here.\n> \n> > Who's going to do the legwork on this?\n> \n> Can Bruce take care of committing the patch for this? Bruce?\n> \n> This should definitely be in the next point release IMV.\n\nYes, I can, though it seems like a much bigger issue than pg_upgrade.\nI will be glad to dig into it.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Tue, 6 Jul 2021 18:30:41 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Tue, Jul 6, 2021 at 3:30 PM Bruce Momjian <bruce@momjian.us> wrote:\n> Yes, I can, though it seems like a much bigger issue than pg_upgrade.\n> I will be glad to dig into it.\n\nI'm not sure what you mean by that. Technically this would be an issue\nfor any program that uses \"pg_resetwal -x\" in the way that pg_upgrade\ndoes, with those same expectations. But isn't pg_upgrade the only\nknown program that behaves like that?\n\nI don't see any reason why this wouldn't be treated as a pg_upgrade\nbug in the release notes, regardless of the exact nature or provenance\nof the issue -- the pg_upgrade framing seems useful because this is a\npractical problem for pg_upgrade users alone. Have I missed something?\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Tue, 6 Jul 2021 15:46:48 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Tue, Jul 6, 2021 at 06:30:41PM -0400, Bruce Momjian wrote:\n> On Tue, Jul 6, 2021 at 02:27:34PM -0700, Peter Geoghegan wrote:\n> > > BTW, is it really necessary for copy_xact_xlog_xid to invoke pg_resetwal\n> > > so many times? Why can't we pass all of the update-this options in one\n> > > call?\n> > \n> > No opinion here.\n> > \n> > > Who's going to do the legwork on this?\n> > \n> > Can Bruce take care of committing the patch for this? Bruce?\n> > \n> > This should definitely be in the next point release IMV.\n> \n> Yes, I can, though it seems like a much bigger issue than pg_upgrade.\n> I will be glad to dig into it.\n\nBertrand Drouvot provided a patch in the thread, so I will start from\nthat; CC'ing him too.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Tue, 6 Jul 2021 18:47:19 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Tue, Jul 6, 2021 at 03:46:48PM -0700, Peter Geoghegan wrote:\n> On Tue, Jul 6, 2021 at 3:30 PM Bruce Momjian <bruce@momjian.us> wrote:\n> > Yes, I can, though it seems like a much bigger issue than pg_upgrade.\n> > I will be glad to dig into it.\n> \n> I'm not sure what you mean by that. Technically this would be an issue\n> for any program that uses \"pg_resetwal -x\" in the way that pg_upgrade\n> does, with those same expectations. But isn't pg_upgrade the only\n> known program that behaves like that?\n> \n> I don't see any reason why this wouldn't be treated as a pg_upgrade\n> bug in the release notes, regardless of the exact nature or provenance\n> of the issue -- the pg_upgrade framing seems useful because this is a\n> practical problem for pg_upgrade users alone. Have I missed something?\n\nMy point is that there are a lot internals involved here that are not\npart of pg_upgrade, though it probably only affects pg_upgrade. Anyway,\nBertrand patch seems to have what I need.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Tue, 6 Jul 2021 18:49:10 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Tue, Jul 6, 2021 at 3:49 PM Bruce Momjian <bruce@momjian.us> wrote:\n> My point is that there are a lot internals involved here that are not\n> part of pg_upgrade, though it probably only affects pg_upgrade. Anyway,\n> Bertrand patch seems to have what I need.\n\nI was confused by your remarks because I am kind of looking at it from\nthe opposite angle. At least now that I've thought about it a bit.\n\nSince the snippet of pg_resetwal code that sets oldestXid wasn't ever\nintended to be used by pg_upgrade, but was anyway, what we have is a\nsomething that's clearly totally wrong (at least in the pg_upgrade\ncase). It's not just wrong for pg_upgrade to do things that way --\nit's also wildly unreasonable. We heard a complaint about this from\nReddit only because it worked \"as designed\", and so made the cluster\nimmediately have an anti-wraparound autovacuum. But why would anybody\nwant that behavior, even if it was implemented correctly? It simply\nmakes no sense.\n\nThe consequences of this bug are indeed complicated and subtle and\nwill probably never be fully understood. But at the same time fixing\nthe bug now seems kind of simple. (Working backwards to arrive here\nwas a bit tricky, mind you.)\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Tue, 6 Jul 2021 16:20:53 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Tue, Jul 6, 2021 at 06:49:10PM -0400, Bruce Momjian wrote:\n> On Tue, Jul 6, 2021 at 03:46:48PM -0700, Peter Geoghegan wrote:\n> > On Tue, Jul 6, 2021 at 3:30 PM Bruce Momjian <bruce@momjian.us> wrote:\n> > > Yes, I can, though it seems like a much bigger issue than pg_upgrade.\n> > > I will be glad to dig into it.\n> > \n> > I'm not sure what you mean by that. Technically this would be an issue\n> > for any program that uses \"pg_resetwal -x\" in the way that pg_upgrade\n> > does, with those same expectations. But isn't pg_upgrade the only\n> > known program that behaves like that?\n> > \n> > I don't see any reason why this wouldn't be treated as a pg_upgrade\n> > bug in the release notes, regardless of the exact nature or provenance\n> > of the issue -- the pg_upgrade framing seems useful because this is a\n> > practical problem for pg_upgrade users alone. Have I missed something?\n> \n> My point is that there are a lot internals involved here that are not\n> part of pg_upgrade, though it probably only affects pg_upgrade. Anyway,\n> Bertrand patch seems to have what I need.\n\nOne question is how do we want to handle cases where -x next_xid is used\nbut -u oldestXid is not used? Compute a value for oldestXid like we did\npreviously? Throw an error? Leave oldestXid unchanged? I am thinking\nthe last option.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Tue, 6 Jul 2021 20:36:13 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Tue, Jul 6, 2021 at 08:36:13PM -0400, Bruce Momjian wrote:\n> On Tue, Jul 6, 2021 at 06:49:10PM -0400, Bruce Momjian wrote:\n> > My point is that there are a lot internals involved here that are not\n> > part of pg_upgrade, though it probably only affects pg_upgrade. Anyway,\n> > Bertrand patch seems to have what I need.\n> \n> One question is how do we want to handle cases where -x next_xid is used\n> but -u oldestXid is not used? Compute a value for oldestXid like we did\n> previously? Throw an error? Leave oldestXid unchanged? I am thinking\n> the last option.\n\nHere is a modified version of Bertrand's patch, with docs, that does the\nlast option.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.", "msg_date": "Tue, 6 Jul 2021 21:49:03 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "Hi,\n\nOn 7/7/21 3:49 AM, Bruce Momjian wrote:\n> On Tue, Jul 6, 2021 at 08:36:13PM -0400, Bruce Momjian wrote:\n>> On Tue, Jul 6, 2021 at 06:49:10PM -0400, Bruce Momjian wrote:\n>>> My point is that there are a lot internals involved here that are not\n>>> part of pg_upgrade, though it probably only affects pg_upgrade. Anyway,\n>>> Bertrand patch seems to have what I need.\n>> One question is how do we want to handle cases where -x next_xid is used\n>> but -u oldestXid is not used? Compute a value for oldestXid like we did\n>> previously? Throw an error? Leave oldestXid unchanged? I am thinking\n>> the last option.\n> Here is a modified version of Bertrand's patch, with docs, that does the\n> last option.\n\nThanks for having looked at it.\n\nIt looks good to me, but i have one question:\n\n+��� printf(_(\"� -u, --oldest-transaction-id=XID� set oldest transaction \nID\\n\"));\n\nand\n\n+������������������ if (!TransactionIdIsNormal(set_oldest_xid))\n+������������������ {\n+����������������������� pg_log_error(\"oldest transaction ID (-u) must \nbe greater or equal to %u\", FirstNormalTransactionId);\n+����������������������� exit(1);\n+������������������ }\n\nI am wondering if we should not keep my original proposal \"oldest \nunfrozen transaction\" (as compare to \"oldest transaction\") in both \noutput to:\n\n- make the wording similar with what we can found in StartupXLOG():\n\n ��� ereport(DEBUG1,\n ����������� (errmsg_internal(\"oldest unfrozen transaction ID: %u, in \ndatabase %u\",\n ���������������������������� checkPoint.oldestXid, \ncheckPoint.oldestXidDB)));\n\n- give the new� \"-u\" a sense (somehow) from a naming point of view.\n\nWhat do you think?\n\nThanks\n\nBertrand\n\n\n\n", "msg_date": "Thu, 8 Jul 2021 07:35:58 +0200", "msg_from": "\"Drouvot, Bertrand\" <bdrouvot@amazon.com>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Thu, Jul 8, 2021 at 07:35:58AM +0200, Drouvot, Bertrand wrote:\n> Thanks for having looked at it.\n> \n> It looks good to me, but i have one question:\n> \n> +    printf(_(\"  -u, --oldest-transaction-id=XID  set oldest transaction\n> ID\\n\"));\n> \n> and\n> \n> +                   if (!TransactionIdIsNormal(set_oldest_xid))\n> +                   {\n> +                        pg_log_error(\"oldest transaction ID (-u) must be\n> greater or equal to %u\", FirstNormalTransactionId);\n> +                        exit(1);\n> +                   }\n> \n> I am wondering if we should not keep my original proposal \"oldest unfrozen\n> transaction\" (as compare to \"oldest transaction\") in both output to:\n> \n> - make the wording similar with what we can found in StartupXLOG():\n> \n>     ereport(DEBUG1,\n>             (errmsg_internal(\"oldest unfrozen transaction ID: %u, in\n> database %u\",\n>                              checkPoint.oldestXid,\n> checkPoint.oldestXidDB)));\n> \n> - give the new  \"-u\" a sense (somehow) from a naming point of view.\n> \n> What do you think?\n\nI was wondering about that too. We don't use the term \"unfrozen\" in the\npg_control output, and only in a few places in our docs. I added the\nword \"unfrozen\" for the -u doc description in this updated patch ---\nnot sure how much farther to go in using this term, but I am afraid if I\nuse it in the areas you suggested above, it will confuse people who are\ntrying to match it to the pg_control output.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.", "msg_date": "Thu, 8 Jul 2021 09:08:03 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "Also, the pg_upgrade status message still seems to be misplaced:\n\nIn 20210706190612.GM22043@telsasoft.com, Justin Pryzby wrote:\n> I re-arranged the pg_upgrade output of that patch: it was in the middle of the\n> two halves: \"Setting next transaction ID and epoch for new cluster\"\n\n+++ b/src/bin/pg_upgrade/pg_upgrade.c\n@@ -473,6 +473,12 @@ copy_xact_xlog_xid(void)\n \"\\\"%s/pg_resetwal\\\" -f -x %u \\\"%s\\\"\",\n new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid,\n new_cluster.pgdata);\n+ check_ok();\n+ prep_status(\"Setting oldest XID for new cluster\");\n+ exec_prog(UTILITY_LOG_FILE, NULL, true, true,\n+ \"\\\"%s/pg_resetwal\\\" -f -u %u \\\"%s\\\"\",\n+ new_cluster.bindir, old_cluster.controldata.chkpnt_oldstxid,\n+ new_cluster.pgdata);\n exec_prog(UTILITY_LOG_FILE, NULL, true, true,\n \"\\\"%s/pg_resetwal\\\" -f -e %u \\\"%s\\\"\",\n new_cluster.bindir, old_cluster.controldata.chkpnt_nxtepoch,\n\n-- \nJustin\n\n\n", "msg_date": "Thu, 8 Jul 2021 08:11:14 -0500", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Thu, Jul 8, 2021 at 08:11:14AM -0500, Justin Pryzby wrote:\n> Also, the pg_upgrade status message still seems to be misplaced:\n> \n> In 20210706190612.GM22043@telsasoft.com, Justin Pryzby wrote:\n> > I re-arranged the pg_upgrade output of that patch: it was in the middle of the\n> > two halves: \"Setting next transaction ID and epoch for new cluster\"\n> \n> +++ b/src/bin/pg_upgrade/pg_upgrade.c\n> @@ -473,6 +473,12 @@ copy_xact_xlog_xid(void)\n> \"\\\"%s/pg_resetwal\\\" -f -x %u \\\"%s\\\"\",\n> new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid,\n> new_cluster.pgdata);\n> + check_ok();\n> + prep_status(\"Setting oldest XID for new cluster\");\n> + exec_prog(UTILITY_LOG_FILE, NULL, true, true,\n> + \"\\\"%s/pg_resetwal\\\" -f -u %u \\\"%s\\\"\",\n> + new_cluster.bindir, old_cluster.controldata.chkpnt_oldstxid,\n> + new_cluster.pgdata);\n> exec_prog(UTILITY_LOG_FILE, NULL, true, true,\n> \"\\\"%s/pg_resetwal\\\" -f -e %u \\\"%s\\\"\",\n> new_cluster.bindir, old_cluster.controldata.chkpnt_nxtepoch,\n\nWow, you are 100% correct. Updated patch attached.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.", "msg_date": "Thu, 8 Jul 2021 09:51:47 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "\nOn 7/8/21 3:08 PM, Bruce Momjian wrote:\n> CAUTION: This email originated from outside of the organization. Do not click links or open attachments unless you can confirm the sender and know the content is safe.\n>\n>\n>\n> On Thu, Jul 8, 2021 at 07:35:58AM +0200, Drouvot, Bertrand wrote:\n>> Thanks for having looked at it.\n>>\n>> It looks good to me, but i have one question:\n>>\n>> + printf(_(\"  -u, --oldest-transaction-id=XID set oldest transaction\n>> ID\\n\"));\n>>\n>> and\n>>\n>> + if (!TransactionIdIsNormal(set_oldest_xid))\n>> + {\n>> + pg_log_error(\"oldest transaction ID (-u) must be\n>> greater or equal to %u\", FirstNormalTransactionId);\n>> + exit(1);\n>> + }\n>>\n>> I am wondering if we should not keep my original proposal \"oldest unfrozen\n>> transaction\" (as compare to \"oldest transaction\") in both output to:\n>>\n>> - make the wording similar with what we can found in StartupXLOG():\n>>\n>> ereport(DEBUG1,\n>> (errmsg_internal(\"oldest unfrozen transaction ID: %u, in\n>> database %u\",\n>> checkPoint.oldestXid,\n>> checkPoint.oldestXidDB)));\n>>\n>> - give the new \"-u\" a sense (somehow) from a naming point of view.\n>>\n>> What do you think?\n> I was wondering about that too. We don't use the term \"unfrozen\" in the\n> pg_control output, and only in a few places in our docs. I added the\n> word \"unfrozen\" for the -u doc description in this updated patch\nThanks!\n> ---\n> not sure how much farther to go in using this term, but I am afraid if I\n> use it in the areas you suggested above, it will confuse people who are\n> trying to match it to the pg_control output.\n\nMakes sense, thanks for your feedback.\n\nBertrand\n\n\n\n", "msg_date": "Thu, 8 Jul 2021 17:13:55 +0200", "msg_from": "\"Drouvot, Bertrand\" <bdrouvot@amazon.com>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Thu, Jul 8, 2021 at 09:51:47AM -0400, Bruce Momjian wrote:\n> On Thu, Jul 8, 2021 at 08:11:14AM -0500, Justin Pryzby wrote:\n> > Also, the pg_upgrade status message still seems to be misplaced:\n> > \n> > In 20210706190612.GM22043@telsasoft.com, Justin Pryzby wrote:\n> > > I re-arranged the pg_upgrade output of that patch: it was in the middle of the\n> > > two halves: \"Setting next transaction ID and epoch for new cluster\"\n> > \n> > +++ b/src/bin/pg_upgrade/pg_upgrade.c\n> > @@ -473,6 +473,12 @@ copy_xact_xlog_xid(void)\n> > \"\\\"%s/pg_resetwal\\\" -f -x %u \\\"%s\\\"\",\n> > new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid,\n> > new_cluster.pgdata);\n> > + check_ok();\n> > + prep_status(\"Setting oldest XID for new cluster\");\n> > + exec_prog(UTILITY_LOG_FILE, NULL, true, true,\n> > + \"\\\"%s/pg_resetwal\\\" -f -u %u \\\"%s\\\"\",\n> > + new_cluster.bindir, old_cluster.controldata.chkpnt_oldstxid,\n> > + new_cluster.pgdata);\n> > exec_prog(UTILITY_LOG_FILE, NULL, true, true,\n> > \"\\\"%s/pg_resetwal\\\" -f -e %u \\\"%s\\\"\",\n> > new_cluster.bindir, old_cluster.controldata.chkpnt_nxtepoch,\n> \n> Wow, you are 100% correct. Updated patch attached.\n\nOK, I have the patch ready to apply to all supported Postgres versions,\nand it passes all my cross-version pg_upgrade tests.\n\nHowever, I am now stuck on the commit message text, and I think this is\nthe point Peter Geoghegan was trying to make earlier --- while we know\nthat preserving the oldest xid in pg_control is the right thing to do,\nand that setting it to the current xid - 2 billion (the old behavior)\ncauses vacuum freeze to run on all tables, but what else does this patch\naffect?\n\nAs far as I know, seeing a very low oldest xid causes autovacuum to\ncheck all objects and make sure their relfrozenxid is less then\nautovacuum_freeze_max_age, but isn't that just a check? Would that\ncause any table scans? I would think not. And would this cause\nincorrect truncation of pg_xact or fsm or vm files? I would think not\ntoo.\n\nEven if the old and new cluster had mismatched autovacuum_freeze_max_age\nvalues, I don't see how that would cause any corruption either.\n\nI could perhaps see corruption happening if pg_control's oldest xid\nvalue was closer to the current xid value than it should be, but I can't\nsee how having it 2-billion away could cause harm, unless perhaps\npg_upgrade itself used enough xids to cause the counter to wrap more\nthan 2^31 away from the oldest xid recorded in pg_control.\n\nWhat I am basically asking is how to document this and what it fixes.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Fri, 23 Jul 2021 20:08:52 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Fri, Jul 23, 2021 at 5:08 PM Bruce Momjian <bruce@momjian.us> wrote:\n> However, I am now stuck on the commit message text, and I think this is\n> the point Peter Geoghegan was trying to make earlier --- while we know\n> that preserving the oldest xid in pg_control is the right thing to do,\n> and that setting it to the current xid - 2 billion (the old behavior)\n> causes vacuum freeze to run on all tables, but what else does this patch\n> affect?\n\nAs far as I know the only other thing that it might affect is the\ntraditional use of pg_resetwal: recovering likely-corrupt data.\nGetting the database to limp along for long enough to pg_dump. That is\nthe only interpretation that makes sense, because the code in question\npredates pg_upgrade.\n\nAFAICT that was the original spirit of the code that we're changing here.\n\n> As far as I know, seeing a very low oldest xid causes autovacuum to\n> check all objects and make sure their relfrozenxid is less then\n> autovacuum_freeze_max_age, but isn't that just a check? Would that\n> cause any table scans? I would think not. And would this cause\n> incorrect truncation of pg_xact or fsm or vm files? I would think not\n> too.\n\nTom actually wrote this code. I believe that he questioned the whole\nbasis of it himself quite recently.\n\nWhether or not it's okay to change the behavior in contexts outside of\npg_upgrade (contexts where the user invokes pg_resetwal -x to get the\nsystem to start) is perhaps debatable. It probably doesn't matter very\nmuch if you preserve that behavior for non-pg_upgrade cases -- hard to\nsay. At the same time it's now easy to see that pg_upgrade shouldn't\nbe doing this.\n\n> Even if the old and new cluster had mismatched autovacuum_freeze_max_age\n> values, I don't see how that would cause any corruption either.\n\nSometimes the pg_control value for oldest XID is used as the oldest\nnon-frozen XID that's expected in the table. Other times it's\nrelfrozenxid itself IIRC.\n\n> I could perhaps see corruption happening if pg_control's oldest xid\n> value was closer to the current xid value than it should be, but I can't\n> see how having it 2-billion away could cause harm, unless perhaps\n> pg_upgrade itself used enough xids to cause the counter to wrap more\n> than 2^31 away from the oldest xid recorded in pg_control.\n>\n> What I am basically asking is how to document this and what it fixes.\n\nISTM that this is a little like commits 78db307bb2 and a61daa14. Maybe\ntake a look at those?\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Fri, 23 Jul 2021 17:47:18 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Fri, Jul 23, 2021 at 05:47:18PM -0700, Peter Geoghegan wrote:\n> > I could perhaps see corruption happening if pg_control's oldest xid\n> > value was closer to the current xid value than it should be, but I can't\n> > see how having it 2-billion away could cause harm, unless perhaps\n> > pg_upgrade itself used enough xids to cause the counter to wrap more\n> > than 2^31 away from the oldest xid recorded in pg_control.\n> >\n> > What I am basically asking is how to document this and what it fixes.\n> \n> ISTM that this is a little like commits 78db307bb2 and a61daa14. Maybe\n> take a look at those?\n\nAgreed. I just wanted to make sure I wasn't missing an important aspect\nof this patch. Thanks.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Fri, 23 Jul 2021 21:01:18 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Fri, Jul 23, 2021 at 09:01:18PM -0400, Bruce Momjian wrote:\n> On Fri, Jul 23, 2021 at 05:47:18PM -0700, Peter Geoghegan wrote:\n> > > I could perhaps see corruption happening if pg_control's oldest xid\n> > > value was closer to the current xid value than it should be, but I can't\n> > > see how having it 2-billion away could cause harm, unless perhaps\n> > > pg_upgrade itself used enough xids to cause the counter to wrap more\n> > > than 2^31 away from the oldest xid recorded in pg_control.\n> > >\n> > > What I am basically asking is how to document this and what it fixes.\n> > \n> > ISTM that this is a little like commits 78db307bb2 and a61daa14. Maybe\n> > take a look at those?\n> \n> Agreed. I just wanted to make sure I wasn't missing an important aspect\n> of this patch. Thanks.\n\nAnother question --- with the previous code, the oldest xid was always\nset to a reasonable value, -2 billion less than the current xid. With\nthe new code, the oldest xid might be slightly higher than the current\nxid if they use -x but not -u. Is that acceptable? I think we agreed it\nwas. pg_upgrade will always set both.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Sat, 24 Jul 2021 10:01:05 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" }, { "msg_contents": "On Sat, Jul 24, 2021 at 10:01:05AM -0400, Bruce Momjian wrote:\n> On Fri, Jul 23, 2021 at 09:01:18PM -0400, Bruce Momjian wrote:\n> > On Fri, Jul 23, 2021 at 05:47:18PM -0700, Peter Geoghegan wrote:\n> > > > I could perhaps see corruption happening if pg_control's oldest xid\n> > > > value was closer to the current xid value than it should be, but I can't\n> > > > see how having it 2-billion away could cause harm, unless perhaps\n> > > > pg_upgrade itself used enough xids to cause the counter to wrap more\n> > > > than 2^31 away from the oldest xid recorded in pg_control.\n> > > >\n> > > > What I am basically asking is how to document this and what it fixes.\n> > > \n> > > ISTM that this is a little like commits 78db307bb2 and a61daa14. Maybe\n> > > take a look at those?\n> > \n> > Agreed. I just wanted to make sure I wasn't missing an important aspect\n> > of this patch. Thanks.\n> \n> Another question --- with the previous code, the oldest xid was always\n> set to a reasonable value, -2 billion less than the current xid. With\n> the new code, the oldest xid might be slightly higher than the current\n> xid if they use -x but not -u. Is that acceptable? I think we agreed it\n> was. pg_upgrade will always set both.\n\nThis patch has been applied back to 9.6 and will appear in the next\nminor release.\n\n-- \n Bruce Momjian <bruce@momjian.us> https://momjian.us\n EDB https://enterprisedb.com\n\n If only the physical world exists, free will is an illusion.\n\n\n\n", "msg_date": "Mon, 26 Jul 2021 22:39:17 -0400", "msg_from": "Bruce Momjian <bruce@momjian.us>", "msg_from_op": false, "msg_subject": "Re: visibility map corruption" } ]
[ { "msg_contents": "Hi all\r\n\r\nThe return value of function PQsendFlushRequest is 1 or 0.\r\n-------------------------------------------------------------------\r\n<para>\r\n Sends a request for the server to flush its output buffer.\r\n<synopsis>\r\nint PQsendFlushRequest(PGconn *conn);\r\n</synopsis>\r\n</para>\r\n\r\n<para>\r\n Returns 1 for success. Returns 0 on any failure.\r\n</para>\r\n---------------------------------------------------------------------\r\nBut in the following code, false is returned.\r\nI think it would be better to change to 0.\r\n\r\nint PQsendFlushRequest(PGconn *conn)\r\n{\r\n......\r\n\tif (conn->asyncStatus != PGASYNC_IDLE &&\r\n\t\tconn->pipelineStatus == PQ_PIPELINE_OFF)\r\n\t{\r\n\t\tappendPQExpBufferStr(&conn->errorMessage,\r\n\t\t\t\t\t\t\t libpq_gettext(\"another command is already in progress\\n\"));\r\n\t\treturn false; ※\r\n\t}\r\n......\r\n}\r\n\r\nBest Regards!\r\nZhangjie", "msg_date": "Mon, 5 Jul 2021 01:34:58 +0000", "msg_from": "\"zhangjie2@fujitsu.com\" <zhangjie2@fujitsu.com>", "msg_from_op": true, "msg_subject": "[Patch] change the return value of PQsendFlushRequest" } ]
[ { "msg_contents": "Hi,\n\nThis is not a live bug.\nI think this is worth fixing, just for the sake of style and code\ncorrectness.\nAs a bonus, we have a reduced scope and standardized return.\n\nregards,\nRanier Vilela", "msg_date": "Mon, 5 Jul 2021 09:40:31 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Fix possible variable declaration uninitialized\n (src/backend/utils/adt/varlena.c)" } ]
[ { "msg_contents": "Hi\n\nI used the SetFileInformationByHandle function with the \nFILE_RENAME_FLAG_POSIX_SEMANTICS flag for the file rename function..\n\n1) The _WIN32_WINNT variable needs to be increased to 0x0A00 (Windows \n10).  Fixed conflict with #undef CHECKSUM_TYPE_NONE\n\n2) The SetFileInformationByHandle function works correctly only on \nWindows 10 and higher.\n\nThe app must have a manifest to check the Windows version using the \nIsWindows10OrGreater() function. I added a manifest to all postgres \nprojects and disabled the GenerateManifest option on windows projects.\n\nThis patch related to this post: \nhttps://www.postgresql.org/message-id/CAEepm%3D0FV-k%2B%3Dd9z08cW%3DZXoR1%3Dkw9wdpkP6WAuOrKJdz-8ujg%40mail.gmail.com\n\n\n-- \nVictor Spirin\nPostgres Professional:http://www.postgrespro.com\nThe Russian Postgres Company", "msg_date": "Mon, 5 Jul 2021 16:53:06 +0300", "msg_from": "Victor Spirin <v.spirin@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Atomic rename feature for Windows." }, { "msg_contents": "On Mon, Jul 05, 2021 at 04:53:06PM +0300, Victor Spirin wrote:\n> This patch related to this post:\n> https://www.postgresql.org/message-id/CAEepm%3D0FV-k%2B%3Dd9z08cW%3DZXoR1%3Dkw9wdpkP6WAuOrKJdz-8ujg%40mail.gmail.com\n\nHow does that cope with durable_rename_excl() where rename() is used\non Windows? The problems that 909b449 has somewhat \"fixed\" were\nannoying for the users as it prevented WAL segment recycling, so we\nneed to be sure that this does not cause more harm. \n\n> + /*\n> + * CHECKSUM_TYPE_NONE defined in the winioctl.h when _WIN32_WINNT >= _WIN32_WINNT_WIN10\n> + */\n> +#ifdef CHECKSUM_TYPE_NONE\n> +#undef CHECKSUM_TYPE_NONE\n> +#endif\n\nOkay. Should this be renamed separately then to avoid conflicts?\n\n> - * get support for GetLocaleInfoEx() with locales. For everything else\n> + * Studio 2015 the minimum requirement is Windows 10 (0x0A00) to get support for SetFileInformationByHandle.\n> + * The minimum requirement is Windows Vista (0x0600) get support for GetLocaleInfoEx() with locales.\n> + * For everything else\n> * the minimum version is Windows XP (0x0501).\n> */\n> #if defined(_MSC_VER) && _MSC_VER >= 1900\n> -#define MIN_WINNT 0x0600\n> +#define MIN_WINNT 0x0A00\n> #else\n> #define MIN_WINNT 0x0501\n> #endif\n\nThis is a large bump for Studio >= 2015 I am afraid. That does not\nseem acceptable, as it means losing support for GetLocaleInfoEx()\nacross older versions.\n\n> +#if defined(WIN32) && !defined(__CYGWIN__) && defined(_WIN32_WINNT_WIN10) && _WIN32_WINNT >= _WIN32_WINNT_WIN10\n> +\n> +#include <versionhelpers.h>\n> +\n> +/*\n> + * win10_rename - uses SetFileInformationByHandle function with FILE_RENAME_FLAG_POSIX_SEMANTICS flag for atomic rename file\n> + * working only on Windows 10 or later and _WIN32_WINNT must be >= _WIN32_WINNT_WIN10\n> + */\n> +static int win10_rename(wchar_t const* from, wchar_t const* to)\n\nHaving win10_rename(), a wrapper for pgrename_win10(), which is itself\nan extra wrapper for pgrename(), is confusing. Could you reduce the\nlayers of functions here. At the end we just want an extra runtime\noption for pgrename(). Note that pgrename_win10() could be static to\ndirmod.c, and it seems to me that you just want a small function to do\nthe path conversion anyway. It would be better to avoid using\nmalloc() in those code paths as well, as the backend could finish by\ncalling that. We should be able to remove the malloc()s with local\nvariables large enough to hold those paths, no?\n\n> +\t\t# manifest with ms_compatibility:supportedOS tags for using IsWindows10OrGreater() function\n> +\t\tprint $o \"\\n1 24 \\\"src/port/win10.manifest\\\"\\n\";\n> +\n> \t\tclose($o);\n> \t\tclose($i);\n> \t}\n> diff --git a/src/port/win10.manifest b/src/port/win10.manifest\n> new file mode 100644\n\nIt would be good to not require that. Those extra files make the\nlong-term maintenance harder.\n--\nMichael", "msg_date": "Tue, 6 Jul 2021 10:43:06 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Thanks!\n\nIn this version of the patch, calls to malloc have been removed. \nHopefully MAX_PATH is long enough for filenames.\n\n> How does that cope with durable_rename_excl() where rename() is used\n> on Windows?  The problems that 909b449 has somewhat \"fixed\" were\n> annoying for the users as it prevented WAL segment recycling, so we\n> need to be sure that this does not cause more harm.\n\nI tested this patch to resolve the error message \"could not rename \ntemporary statistics file \"pg_stat_tmp/pgstat.tmp\" to \n\"pg_stat_tmp/pgstat.stat\": Permission \u000bdenied\".  (I have a patch option \nto rename a temporary file for statistics only.)\n\n>> + /*\n>> + * CHECKSUM_TYPE_NONE defined in the winioctl.h when _WIN32_WINNT >= _WIN32_WINNT_WIN10\n>> + */\n>> +#ifdef CHECKSUM_TYPE_NONE\n>> +#undef CHECKSUM_TYPE_NONE\n>> +#endif\n> Okay. Should this be renamed separately then to avoid conflicts?\n>\nRenaming CHECKSUM_TYPE_NONE in the  checksum_helper.h is the best way to go.\n\n> #if defined(_MSC_VER) && _MSC_VER >= 1900\n> -#define MIN_WINNT 0x0600\n> +#define MIN_WINNT 0x0A00\n> #else\n> #define MIN_WINNT 0x0501\n> #endif\n> This is a large bump for Studio >= 2015 I am afraid. That does not\n> seem acceptable, as it means losing support for GetLocaleInfoEx()\n> across older versions.\n>\nIt seems that the MIN_WINNT value 0x0600 or 0x0A00 does not affect the \nuse of the GetLocaleInfoEx () function\n\n>> +\t\t# manifest with ms_compatibility:supportedOS tags for using IsWindows10OrGreater() function\n>> +\t\tprint $o \"\\n1 24 \\\"src/port/win10.manifest\\\"\\n\";\n>> +\n>> \t\tclose($o);\n>> \t\tclose($i);\n>> \t}\n>> diff --git a/src/port/win10.manifest b/src/port/win10.manifest\n>> new file mode 100644\n> It would be good to not require that. Those extra files make the\n> long-term maintenance harder.\nFunction IsWindows10OrGreater() working properly if there is manifest \nwith <ms_compatibility:supportedOS \nId=\"{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}\" />\n\n\"Applications not manifested for Windows 10 return false, even if the \ncurrent operating system version is Windows 10.\"\n\n\nVictor Spirin\nPostgres Professional:http://www.postgrespro.com\nThe Russian Postgres Company\n\n06.07.2021 4:43, Michael Paquier пишет:\n> On Mon, Jul 05, 2021 at 04:53:06PM +0300, Victor Spirin wrote:\n>> This patch related to this post:\n>> https://www.postgresql.org/message-id/CAEepm%3D0FV-k%2B%3Dd9z08cW%3DZXoR1%3Dkw9wdpkP6WAuOrKJdz-8ujg%40mail.gmail.com\n> How does that cope with durable_rename_excl() where rename() is used\n> on Windows? The problems that 909b449 has somewhat \"fixed\" were\n> annoying for the users as it prevented WAL segment recycling, so we\n> need to be sure that this does not cause more harm.\n>\n>> + /*\n>> + * CHECKSUM_TYPE_NONE defined in the winioctl.h when _WIN32_WINNT >= _WIN32_WINNT_WIN10\n>> + */\n>> +#ifdef CHECKSUM_TYPE_NONE\n>> +#undef CHECKSUM_TYPE_NONE\n>> +#endif\n> Okay. Should this be renamed separately then to avoid conflicts?\n>\n>> - * get support for GetLocaleInfoEx() with locales. For everything else\n>> + * Studio 2015 the minimum requirement is Windows 10 (0x0A00) to get support for SetFileInformationByHandle.\n>> + * The minimum requirement is Windows Vista (0x0600) get support for GetLocaleInfoEx() with locales.\n>> + * For everything else\n>> * the minimum version is Windows XP (0x0501).\n>> */\n>> #if defined(_MSC_VER) && _MSC_VER >= 1900\n>> -#define MIN_WINNT 0x0600\n>> +#define MIN_WINNT 0x0A00\n>> #else\n>> #define MIN_WINNT 0x0501\n>> #endif\n> This is a large bump for Studio >= 2015 I am afraid. That does not\n> seem acceptable, as it means losing support for GetLocaleInfoEx()\n> across older versions.\n>\n>> +#if defined(WIN32) && !defined(__CYGWIN__) && defined(_WIN32_WINNT_WIN10) && _WIN32_WINNT >= _WIN32_WINNT_WIN10\n>> +\n>> +#include <versionhelpers.h>\n>> +\n>> +/*\n>> + * win10_rename - uses SetFileInformationByHandle function with FILE_RENAME_FLAG_POSIX_SEMANTICS flag for atomic rename file\n>> + * working only on Windows 10 or later and _WIN32_WINNT must be >= _WIN32_WINNT_WIN10\n>> + */\n>> +static int win10_rename(wchar_t const* from, wchar_t const* to)\n> Having win10_rename(), a wrapper for pgrename_win10(), which is itself\n> an extra wrapper for pgrename(), is confusing. Could you reduce the\n> layers of functions here. At the end we just want an extra runtime\n> option for pgrename(). Note that pgrename_win10() could be static to\n> dirmod.c, and it seems to me that you just want a small function to do\n> the path conversion anyway. It would be better to avoid using\n> malloc() in those code paths as well, as the backend could finish by\n> calling that. We should be able to remove the malloc()s with local\n> variables large enough to hold those paths, no?\n>\n>> +\t\t# manifest with ms_compatibility:supportedOS tags for using IsWindows10OrGreater() function\n>> +\t\tprint $o \"\\n1 24 \\\"src/port/win10.manifest\\\"\\n\";\n>> +\n>> \t\tclose($o);\n>> \t\tclose($i);\n>> \t}\n>> diff --git a/src/port/win10.manifest b/src/port/win10.manifest\n>> new file mode 100644\n> It would be good to not require that. Those extra files make the\n> long-term maintenance harder.\n> --\n> Michael", "msg_date": "Thu, 8 Jul 2021 01:32:04 +0300", "msg_from": "Victor Spirin <v.spirin@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Hello.\n\nI have changed the way I add the manifest to projects. I used the \nAdditionalManifestFiles option for a VS project.\n\nVictor Spirin\nPostgres Professional:http://www.postgrespro.com\nThe Russian Postgres Company\n\n08.07.2021 1:32, Victor Spirin пишет:\n> Thanks!\n>\n> In this version of the patch, calls to malloc have been removed. \n> Hopefully MAX_PATH is long enough for filenames.\n>\n>> How does that cope with durable_rename_excl() where rename() is used\n>> on Windows?  The problems that 909b449 has somewhat \"fixed\" were\n>> annoying for the users as it prevented WAL segment recycling, so we\n>> need to be sure that this does not cause more harm.\n>\n> I tested this patch to resolve the error message \"could not rename \n> temporary statistics file \"pg_stat_tmp/pgstat.tmp\" to \n> \"pg_stat_tmp/pgstat.stat\": Permission \u000bdenied\".  (I have a patch \n> option to rename a temporary file for statistics only.)\n>\n>>> + /*\n>>> + * CHECKSUM_TYPE_NONE defined in the winioctl.h when _WIN32_WINNT \n>>> >= _WIN32_WINNT_WIN10\n>>> + */\n>>> +#ifdef CHECKSUM_TYPE_NONE\n>>> +#undef CHECKSUM_TYPE_NONE\n>>> +#endif\n>> Okay.  Should this be renamed separately then to avoid conflicts?\n>>\n> Renaming CHECKSUM_TYPE_NONE in the  checksum_helper.h is the best way \n> to go.\n>\n>>   #if defined(_MSC_VER) && _MSC_VER >= 1900\n>> -#define MIN_WINNT 0x0600\n>> +#define MIN_WINNT 0x0A00\n>>   #else\n>>   #define MIN_WINNT 0x0501\n>>   #endif\n>> This is a large bump for Studio >= 2015 I am afraid.  That does not\n>> seem acceptable, as it means losing support for GetLocaleInfoEx()\n>> across older versions.\n>>\n> It seems that the MIN_WINNT value 0x0600 or 0x0A00 does not affect the \n> use of the GetLocaleInfoEx () function\n>\n>>> +        # manifest with ms_compatibility:supportedOS tags for using \n>>> IsWindows10OrGreater() function\n>>> +        print $o \"\\n1 24 \\\"src/port/win10.manifest\\\"\\n\";\n>>> +\n>>>           close($o);\n>>>           close($i);\n>>>       }\n>>> diff --git a/src/port/win10.manifest b/src/port/win10.manifest\n>>> new file mode 100644\n>> It would be good to not require that.  Those extra files make the\n>> long-term maintenance harder.\n> Function IsWindows10OrGreater() working properly if there is manifest \n> with <ms_compatibility:supportedOS \n> Id=\"{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}\" />\n>\n> \"Applications not manifested for Windows 10 return false, even if the \n> current operating system version is Windows 10.\"\n>\n>\n> Victor Spirin\n> Postgres Professional:http://www.postgrespro.com\n> The Russian Postgres Company\n>\n> 06.07.2021 4:43, Michael Paquier пишет:\n>> On Mon, Jul 05, 2021 at 04:53:06PM +0300, Victor Spirin wrote:\n>>> This patch related to this post:\n>>> https://www.postgresql.org/message-id/CAEepm%3D0FV-k%2B%3Dd9z08cW%3DZXoR1%3Dkw9wdpkP6WAuOrKJdz-8ujg%40mail.gmail.com \n>>>\n>> How does that cope with durable_rename_excl() where rename() is used\n>> on Windows?  The problems that 909b449 has somewhat \"fixed\" were\n>> annoying for the users as it prevented WAL segment recycling, so we\n>> need to be sure that this does not cause more harm.\n>>\n>>> + /*\n>>> + * CHECKSUM_TYPE_NONE defined in the winioctl.h when _WIN32_WINNT \n>>> >= _WIN32_WINNT_WIN10\n>>> + */\n>>> +#ifdef CHECKSUM_TYPE_NONE\n>>> +#undef CHECKSUM_TYPE_NONE\n>>> +#endif\n>> Okay.  Should this be renamed separately then to avoid conflicts?\n>>\n>>> - * get support for GetLocaleInfoEx() with locales. For everything else\n>>> + * Studio 2015 the minimum requirement is Windows 10 (0x0A00) to \n>>> get support for SetFileInformationByHandle.\n>>> + * The minimum requirement is Windows Vista (0x0600) get support \n>>> for GetLocaleInfoEx() with locales.\n>>> + * For everything else\n>>>    * the minimum version is Windows XP (0x0501).\n>>>    */\n>>>   #if defined(_MSC_VER) && _MSC_VER >= 1900\n>>> -#define MIN_WINNT 0x0600\n>>> +#define MIN_WINNT 0x0A00\n>>>   #else\n>>>   #define MIN_WINNT 0x0501\n>>>   #endif\n>> This is a large bump for Studio >= 2015 I am afraid.  That does not\n>> seem acceptable, as it means losing support for GetLocaleInfoEx()\n>> across older versions.\n>>\n>>> +#if defined(WIN32) && !defined(__CYGWIN__) && \n>>> defined(_WIN32_WINNT_WIN10) && _WIN32_WINNT >= _WIN32_WINNT_WIN10\n>>> +\n>>> +#include <versionhelpers.h>\n>>> +\n>>> +/*\n>>> + * win10_rename - uses SetFileInformationByHandle function with \n>>> FILE_RENAME_FLAG_POSIX_SEMANTICS flag for atomic rename file\n>>> + * working only on Windows 10 or later and  _WIN32_WINNT must be >= \n>>> _WIN32_WINNT_WIN10\n>>> + */\n>>> +static int win10_rename(wchar_t const* from, wchar_t const* to)\n>> Having win10_rename(), a wrapper for pgrename_win10(), which is itself\n>> an extra wrapper for pgrename(), is confusing.  Could you reduce the\n>> layers of functions here.  At the end we just want an extra runtime\n>> option for pgrename().  Note that pgrename_win10() could be static to\n>> dirmod.c, and it seems to me that you just want a small function to do\n>> the path conversion anyway.  It would be better to avoid using\n>> malloc() in those code paths as well, as the backend could finish by\n>> calling that.  We should be able to remove the malloc()s with local\n>> variables large enough to hold those paths, no?\n>>\n>>> +        # manifest with ms_compatibility:supportedOS tags for using \n>>> IsWindows10OrGreater() function\n>>> +        print $o \"\\n1 24 \\\"src/port/win10.manifest\\\"\\n\";\n>>> +\n>>>           close($o);\n>>>           close($i);\n>>>       }\n>>> diff --git a/src/port/win10.manifest b/src/port/win10.manifest\n>>> new file mode 100644\n>> It would be good to not require that.  Those extra files make the\n>> long-term maintenance harder.\n>> -- \n>> Michael", "msg_date": "Mon, 6 Sep 2021 20:44:43 +0300", "msg_from": "Victor Spirin <v.spirin@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Tue, Sep 7, 2021 at 5:44 AM Victor Spirin <v.spirin@postgrespro.ru> wrote:\n> I have changed the way I add the manifest to projects. I used the\n> AdditionalManifestFiles option for a VS project.\n\nHi Victor,\n\nThanks for working on this!\n\nI wonder if POSIX-style rename is used automatically on recent\nWindows, based on the new clue that DeleteFile() has started\ndefaulting to POSIX semantics[1] (maybe it would require ReplaceFile()\ninstead of MoveFileEx(), but I have no idea.) If so, one question is\nwhether we'd still want to do this explicit POSIX rename dance, or\nwhether we should just wait a bit longer for it to happen\nautomatically on all relevant systems (plus tweak to use ReplaceFile()\nif necessary). If not, we might want to do what you're proposing\nanyway, especially if ReplaceFile() is required, because its interface\nis weird (it only works if the other file exists?). Hmm, that alone\nwould be a good reason to go with your plan regardless, and of course\nit would be good to see this fixed everywhere ASAP.\n\nWe still have to answer that question for pgunlink(). I was\ncontemplating that over in that other thread, because unlink() ->\nEACCES is blocking something I'm working on. I found a partial\nsolution to that that works even on old and non-NTFS systems, and I\nwas thinking that would be enough for now and we could just patiently\nwait until automatic POSIX semantics to arrives on all relevant\nsystems as the real long term solution, so I didn't need to expend\nenergy doing an intermediate explicit POSIX-mode wrapper like what\nyou're proposing. But then it seems strange to make a different\nchoice about that for rename() and unlink(). So... do you think it\nwould make sense to extend your patch to cover unlink() too?\n\nIt would be great to have a tool in the tree that tests directory\nentry semantics, called something like src/bin/pg_test_dirmod, so that\nit becomes very clear when POSIX semantics are being used. It could\ntest various interesting unlink and rename scenarios through our\nwrappers (concurrent file handles, creating a new file with the name\nof the old one, unlinking the containing directory, ...). It could\nrun on the build farm animals, and we could even ask people to run it\nwhen they report problems, to try to lift the fog of bizarro Windows\nfile system semantics.\n\nHow exactly does the function fail on a file system that doesn't\nsupport the new POSIX semantics? Assuming there is something like\nENOSUPP to report \"file system says no\", do we want to keep trying\nevery time, or remember that it doesn't work? I guess the answer may\nvary per mount point, which makes it hard to track when you only have\nan fd...\n\nIf it fails because of a sharing violation, it seems strange that we\nimmediately fall back to the old code to do the traditional (and\nhorrible) sleep/retry loop. That means that in rare conditions we can\nstill get the old behaviour that leads to problems, just because of a\ntransient condition. Hmm. Would it make more sense to say: fall back\nto the traditional behaviour only for ENOSUPP (if there is such a\nthing), cope with transient sharing violations without giving up on\nPOSIX semantics, and report all other failures immediately?\n\nI agree that the existing enum CHECKSUM_TYPE_NONE + friends should be\nrenamed to something less collision-prone and more consistent with the\nname of the enum (\"pg_checksum_type\"), so I'd vote for adding a PG_\nprefix, in a separate patch.\n\n+ <Manifest>\n+ <AdditionalManifestFiles>src/port/win10.manifest</AdditionalManifestFiles>\n+ </Manifest>\n\nI have no opinion on how you're supposed to test for OS versions, but\none trivial observation is that that file declares support for many\nWindows releases, and I guess pretty soon you'll need to add 11, and\nthen we'll wonder why it says 10 in the file name. Would it be better\nas \"windows.manifest\" or something?\n\n+pgrename_win10(const char *from, const char *to)\n\nSame thought on the name: this'll age badly. What about something\nlike pgrename_windows_posix_semantics?\n\n+typedef struct _FILE_RENAME_INFO_VVS {\n+ union {\n+ BOOLEAN ReplaceIfExists; // FileRenameInfo\n+ DWORD Flags; // FileRenameInfoEx\n+ } DUMMYUNIONNAME;\n+ HANDLE RootDirectory;\n+ DWORD FileNameLength;\n+ WCHAR FileName[MAX_PATH];\n+} FILE_RENAME_INFO_VVS;\n\nWhy can't we use a system header[2] for this?\n\n+ if (MultiByteToWideChar(CP_ACP, 0, (LPCCH)from, -1,\n(LPWSTR)from_w, MAX_PATH) == 0) return -1;\n+ if (MultiByteToWideChar(CP_ACP, 0, (LPCCH)to, -1,\n(LPWSTR)rename_info.FileName, MAX_PATH) == 0) return -1;\n\nDon't these need _dosmaperr(GetLastError())?\n\n[1] https://www.postgresql.org/message-id/20210905214437.y25j42yigwnbdvtg%40alap3.anarazel.de\n[2] https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_rename_info\n\n\n", "msg_date": "Tue, 7 Sep 2021 13:40:51 +1200", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Thank you,\n\nIn this variant:\n\n1) renamed file win10.manifest to windows.manifest\n\n2) renamed function pgrename_win10 to pgrename_windows_posix_semantics\n\n3) Function pgrename returns result of pgrename_windows_posix_semantics \nfunction and not contiue run old version of function.\n\n4) Added call GetLastError() after error MultiByteToWideChar fuction.\n\n> +typedef struct _FILE_RENAME_INFO_VVS {\n> + union {\n> + BOOLEAN ReplaceIfExists; // FileRenameInfo\n> + DWORD Flags; // FileRenameInfoEx\n> + } DUMMYUNIONNAME;\n> + HANDLE RootDirectory;\n> + DWORD FileNameLength;\n> + WCHAR FileName[MAX_PATH];\n> +} FILE_RENAME_INFO_VVS;\n>\n> Why can't we use a system header[2] for this?\nI have a dynamic memory allocation version in the first patch.\n\n     len = wcslen(to_w);\n     rename_info = (FILE_RENAME_INFO*)malloc(sizeof(FILE_RENAME_INFO) + \n(len + 1) * sizeof(wchar_t));\n\n     rename_info->ReplaceIfExists = TRUE;\n     rename_info->Flags = FILE_RENAME_FLAG_POSIX_SEMANTICS | \nFILE_RENAME_FLAG_REPLACE_IF_EXISTS;\n\n     rename_info->RootDirectory = NULL;\n     rename_info->FileNameLength = len;\n     memcpy(rename_info->FileName, to_w, (len + 1) * sizeof(wchar_t));\n\nIs this code better? Maybe there is another correct method?\n\nI checked the pgrename_windows_posix_semantics() function on Windows 7. \nIt returns error 87: Parameter is incorrect. Hence, it is necessary to \ncheck the Windows version and call the old pgrename function for old \nWindows.\n\n\nVictor Spirin\nPostgres Professional:http://www.postgrespro.com\nThe Russian Postgres Company\n\n07.09.2021 4:40, Thomas Munro пишет:\n> On Tue, Sep 7, 2021 at 5:44 AM Victor Spirin <v.spirin@postgrespro.ru> wrote:\n>> I have changed the way I add the manifest to projects. I used the\n>> AdditionalManifestFiles option for a VS project.\n> Hi Victor,\n>\n> Thanks for working on this!\n>\n> I wonder if POSIX-style rename is used automatically on recent\n> Windows, based on the new clue that DeleteFile() has started\n> defaulting to POSIX semantics[1] (maybe it would require ReplaceFile()\n> instead of MoveFileEx(), but I have no idea.) If so, one question is\n> whether we'd still want to do this explicit POSIX rename dance, or\n> whether we should just wait a bit longer for it to happen\n> automatically on all relevant systems (plus tweak to use ReplaceFile()\n> if necessary). If not, we might want to do what you're proposing\n> anyway, especially if ReplaceFile() is required, because its interface\n> is weird (it only works if the other file exists?). Hmm, that alone\n> would be a good reason to go with your plan regardless, and of course\n> it would be good to see this fixed everywhere ASAP.\n>\n> We still have to answer that question for pgunlink(). I was\n> contemplating that over in that other thread, because unlink() ->\n> EACCES is blocking something I'm working on. I found a partial\n> solution to that that works even on old and non-NTFS systems, and I\n> was thinking that would be enough for now and we could just patiently\n> wait until automatic POSIX semantics to arrives on all relevant\n> systems as the real long term solution, so I didn't need to expend\n> energy doing an intermediate explicit POSIX-mode wrapper like what\n> you're proposing. But then it seems strange to make a different\n> choice about that for rename() and unlink(). So... do you think it\n> would make sense to extend your patch to cover unlink() too?\n>\n> It would be great to have a tool in the tree that tests directory\n> entry semantics, called something like src/bin/pg_test_dirmod, so that\n> it becomes very clear when POSIX semantics are being used. It could\n> test various interesting unlink and rename scenarios through our\n> wrappers (concurrent file handles, creating a new file with the name\n> of the old one, unlinking the containing directory, ...). It could\n> run on the build farm animals, and we could even ask people to run it\n> when they report problems, to try to lift the fog of bizarro Windows\n> file system semantics.\n>\n> How exactly does the function fail on a file system that doesn't\n> support the new POSIX semantics? Assuming there is something like\n> ENOSUPP to report \"file system says no\", do we want to keep trying\n> every time, or remember that it doesn't work? I guess the answer may\n> vary per mount point, which makes it hard to track when you only have\n> an fd...\n>\n> If it fails because of a sharing violation, it seems strange that we\n> immediately fall back to the old code to do the traditional (and\n> horrible) sleep/retry loop. That means that in rare conditions we can\n> still get the old behaviour that leads to problems, just because of a\n> transient condition. Hmm. Would it make more sense to say: fall back\n> to the traditional behaviour only for ENOSUPP (if there is such a\n> thing), cope with transient sharing violations without giving up on\n> POSIX semantics, and report all other failures immediately?\n>\n> I agree that the existing enum CHECKSUM_TYPE_NONE + friends should be\n> renamed to something less collision-prone and more consistent with the\n> name of the enum (\"pg_checksum_type\"), so I'd vote for adding a PG_\n> prefix, in a separate patch.\n>\n> + <Manifest>\n> + <AdditionalManifestFiles>src/port/win10.manifest</AdditionalManifestFiles>\n> + </Manifest>\n>\n> I have no opinion on how you're supposed to test for OS versions, but\n> one trivial observation is that that file declares support for many\n> Windows releases, and I guess pretty soon you'll need to add 11, and\n> then we'll wonder why it says 10 in the file name. Would it be better\n> as \"windows.manifest\" or something?\n>\n> +pgrename_win10(const char *from, const char *to)\n>\n> Same thought on the name: this'll age badly. What about something\n> like pgrename_windows_posix_semantics?\n>\n> +typedef struct _FILE_RENAME_INFO_VVS {\n> + union {\n> + BOOLEAN ReplaceIfExists; // FileRenameInfo\n> + DWORD Flags; // FileRenameInfoEx\n> + } DUMMYUNIONNAME;\n> + HANDLE RootDirectory;\n> + DWORD FileNameLength;\n> + WCHAR FileName[MAX_PATH];\n> +} FILE_RENAME_INFO_VVS;\n>\n> Why can't we use a system header[2] for this?\n>\n> + if (MultiByteToWideChar(CP_ACP, 0, (LPCCH)from, -1,\n> (LPWSTR)from_w, MAX_PATH) == 0) return -1;\n> + if (MultiByteToWideChar(CP_ACP, 0, (LPCCH)to, -1,\n> (LPWSTR)rename_info.FileName, MAX_PATH) == 0) return -1;\n>\n> Don't these need _dosmaperr(GetLastError())?\n>\n> [1] https://www.postgresql.org/message-id/20210905214437.y25j42yigwnbdvtg%40alap3.anarazel.de\n> [2] https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_rename_info\n>\n>", "msg_date": "Wed, 8 Sep 2021 00:40:11 +0300", "msg_from": "Victor Spirin <v.spirin@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Thu, Jul 8, 2021 at 12:32 AM Victor Spirin <v.spirin@postgrespro.ru>\nwrote:\n\n>\n> > #if defined(_MSC_VER) && _MSC_VER >= 1900\n> > -#define MIN_WINNT 0x0600\n> > +#define MIN_WINNT 0x0A00\n> > #else\n> > #define MIN_WINNT 0x0501\n> > #endif\n> > This is a large bump for Studio >= 2015 I am afraid. That does not\n> > seem acceptable, as it means losing support for GetLocaleInfoEx()\n> > across older versions.\n> >\n> It seems that the MIN_WINNT value 0x0600 or 0x0A00 does not affect the\n> use of the GetLocaleInfoEx () function\n>\n> Anything below Windows Server 2012 (_WIN32_WINNT = 0x0602) is no longer\nsupported. A patch with a bump on MIN_WINNT might be due.\n\nRegards,\n\nJuan José Santamaría Flecha\n\nOn Thu, Jul 8, 2021 at 12:32 AM Victor Spirin <v.spirin@postgrespro.ru> wrote:\n>   #if defined(_MSC_VER) && _MSC_VER >= 1900\n> -#define MIN_WINNT 0x0600\n> +#define MIN_WINNT 0x0A00\n>   #else\n>   #define MIN_WINNT 0x0501\n>   #endif\n> This is a large bump for Studio >= 2015 I am afraid.  That does not\n> seem acceptable, as it means losing support for GetLocaleInfoEx()\n> across older versions.\n>\nIt seems that the MIN_WINNT value 0x0600 or 0x0A00 does not affect the \nuse of the GetLocaleInfoEx () function Anything below Windows Server 2012 (_WIN32_WINNT = 0x0602) is no longer supported. A patch with a bump on MIN_WINNT might be due.Regards,Juan José Santamaría Flecha", "msg_date": "Wed, 8 Sep 2021 12:06:20 +0200", "msg_from": "=?UTF-8?Q?Juan_Jos=C3=A9_Santamar=C3=ADa_Flecha?=\n <juanjo.santamaria@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Tue, Sep 7, 2021 at 11:40 PM Victor Spirin <v.spirin@postgrespro.ru>\nwrote:\n\n>\n> I checked the pgrename_windows_posix_semantics() function on Windows 7.\n> It returns error 87: Parameter is incorrect. Hence, it is necessary to\n> check the Windows version and call the old pgrename function for old\n> Windows.\n>\n> The FILE_RENAME_FLAGs are available starting from Windows 10 Release id\n1607, NTDDI_WIN10_RS1. The check should be using something like\nIsWindowsVersionOrGreater(10, 0, 1607). Or you could test this\nusing RtlGetVersion(), loading it from ntdll infrastructure coming from\nstat() patch [1], which doesn't need a manifest.\n\n[1]\nhttps://www.postgresql.org/message-id/flat/CA%2BhUKG%2BoLqfBVJ_j3C03QgoshrX1KxYq0LB1vJV0OXPOcZZfhA%40mail.gmail.com#bfcc256e4eda369e369275f5b4e38185\n\nRegards,\n\nJuan José Santamaría Flecha\n\nOn Tue, Sep 7, 2021 at 11:40 PM Victor Spirin <v.spirin@postgrespro.ru> wrote:\nI checked the pgrename_windows_posix_semantics() function on Windows 7. \nIt returns error 87: Parameter is incorrect. Hence, it is necessary to \ncheck the Windows version and call the old pgrename function for old \nWindows.The FILE_RENAME_FLAGs are available starting from Windows 10 Release id 1607, NTDDI_WIN10_RS1. The check should be using something like IsWindowsVersionOrGreater(10, 0, 1607). Or you could test this using RtlGetVersion(), loading it from ntdll infrastructure coming from stat() patch [1], which doesn't need a manifest.[1] https://www.postgresql.org/message-id/flat/CA%2BhUKG%2BoLqfBVJ_j3C03QgoshrX1KxYq0LB1vJV0OXPOcZZfhA%40mail.gmail.com#bfcc256e4eda369e369275f5b4e38185Regards,Juan José Santamaría Flecha", "msg_date": "Thu, 23 Sep 2021 13:18:21 +0200", "msg_from": "=?UTF-8?Q?Juan_Jos=C3=A9_Santamar=C3=ADa_Flecha?=\n <juanjo.santamaria@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Wed, Sep 8, 2021 at 9:40 AM Victor Spirin <v.spirin@postgrespro.ru> wrote:\n> Is this code better? Maybe there is another correct method?\n\nHmm, if we want to use the system header's struct definition, add some\nspace for a path at the end, and avoid heap allocation, perhaps we\ncould do something like:\n\nstruct {\n FILE_RENAME_INFO fri;\n WCHAR extra_space[MAX_PATH];\n} x;\n\n\n", "msg_date": "Thu, 23 Sep 2021 23:46:53 +1200", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Wed, Sep 8, 2021 at 10:13 PM Juan José Santamaría Flecha\n<juanjo.santamaria@gmail.com> wrote:\n> On Thu, Jul 8, 2021 at 12:32 AM Victor Spirin <v.spirin@postgrespro.ru> wrote:\n>> > #if defined(_MSC_VER) && _MSC_VER >= 1900\n>> > -#define MIN_WINNT 0x0600\n>> > +#define MIN_WINNT 0x0A00\n>> > #else\n>> > #define MIN_WINNT 0x0501\n>> > #endif\n>> > This is a large bump for Studio >= 2015 I am afraid. That does not\n>> > seem acceptable, as it means losing support for GetLocaleInfoEx()\n>> > across older versions.\n>> >\n>> It seems that the MIN_WINNT value 0x0600 or 0x0A00 does not affect the\n>> use of the GetLocaleInfoEx () function\n>>\n> Anything below Windows Server 2012 (_WIN32_WINNT = 0x0602) is no longer supported. A patch with a bump on MIN_WINNT might be due.\n\n+1\n\n\n", "msg_date": "Fri, 24 Sep 2021 15:32:25 +1200", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Thank you,\n\nFixed FILE_RENAME_INFO structure\n\nI prepared 2 versions of the patch:\n\n1) with manifest and IsWindows10OrGreater() function\n2) without manifest and RtlGetVersion function from ntdll.dll\n\nWhat's better?\n\nVictor Spirin\nPostgres Professional:http://www.postgrespro.com\nThe Russian Postgres Company\n\n23.09.2021 14:46, Thomas Munro пишет:\n> On Wed, Sep 8, 2021 at 9:40 AM Victor Spirin <v.spirin@postgrespro.ru> wrote:\n>> Is this code better? Maybe there is another correct method?\n> Hmm, if we want to use the system header's struct definition, add some\n> space for a path at the end, and avoid heap allocation, perhaps we\n> could do something like:\n>\n> struct {\n> FILE_RENAME_INFO fri;\n> WCHAR extra_space[MAX_PATH];\n> } x;\n>\n>", "msg_date": "Thu, 30 Sep 2021 23:49:22 +0300", "msg_from": "Victor Spirin <v.spirin@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Thanks.\n\nIsWindowsVersionOrGreater(10,0,1607) always returns false\n\nOnly IsWindowsVersionOrGreater(10,0,0) is a valid call. (There are no \nservice packs in Windows 10.)\n\nI haven't found a way to determine the Windows 10 release ID.\nThe RtlGetVersion function returns dwBuildNumber = 19042 on my Windows.\n\nI heard that Microsoft does not support older versions of Windows 10 and \nrequires a mandatory update.\n\n\nVictor Spirin\nPostgres Professional:http://www.postgrespro.com\nThe Russian Postgres Company\n\n23.09.2021 14:18, Juan José Santamaría Flecha пишет:\n>\n> On Tue, Sep 7, 2021 at 11:40 PM Victor Spirin <v.spirin@postgrespro.ru \n> <mailto:v.spirin@postgrespro.ru>> wrote:\n>\n>\n> I checked the pgrename_windows_posix_semantics() function on\n> Windows 7.\n> It returns error 87: Parameter is incorrect. Hence, it is\n> necessary to\n> check the Windows version and call the old pgrename function for old\n> Windows.\n>\n> The FILE_RENAME_FLAGs are available starting from Windows 10 Release \n> id 1607, NTDDI_WIN10_RS1. The check should be using something like \n> IsWindowsVersionOrGreater(10, 0, 1607). Or you could test this \n> using RtlGetVersion(), loading it from ntdll infrastructure coming \n> from stat() patch [1], which doesn't need a manifest.\n>\n> [1] \n> https://www.postgresql.org/message-id/flat/CA%2BhUKG%2BoLqfBVJ_j3C03QgoshrX1KxYq0LB1vJV0OXPOcZZfhA%40mail.gmail.com#bfcc256e4eda369e369275f5b4e38185 \n> <https://www.postgresql.org/message-id/flat/CA%2BhUKG%2BoLqfBVJ_j3C03QgoshrX1KxYq0LB1vJV0OXPOcZZfhA%40mail.gmail.com#bfcc256e4eda369e369275f5b4e38185>\n>\n> Regards,\n>\n> Juan José Santamaría Flecha\n\n\n\n\n\n\n Thanks.\n\n IsWindowsVersionOrGreater(10,0,1607) always returns false\n\n Only IsWindowsVersionOrGreater(10,0,0) is a valid call. (There are\n no service packs in Windows 10.)\n\n I haven't found a way to determine the Windows 10 release ID.\n The RtlGetVersion function returns dwBuildNumber = 19042 on my\n Windows.\nI heard that Microsoft does not support older versions of Windows\n 10 and requires a mandatory update.\n\n\n\nVictor Spirin\nPostgres Professional:http://www.postgrespro.com\nThe Russian Postgres Company\n\n\n23.09.2021 14:18, Juan José Santamaría\n Flecha пишет:\n\n\n\n\n\n\n\nOn Tue, Sep 7, 2021 at 11:40\n PM Victor Spirin <v.spirin@postgrespro.ru>\n wrote:\n\n\n I checked the pgrename_windows_posix_semantics() function on\n Windows 7. \n It returns error 87: Parameter is incorrect. Hence, it is\n necessary to \n check the Windows version and call the old pgrename function\n for old \n Windows.\n\n\nThe FILE_RENAME_FLAGs are available starting from Windows\n 10 Release id 1607, NTDDI_WIN10_RS1. The check should be\n using something like IsWindowsVersionOrGreater(10, 0, 1607).\n Or you could test this using RtlGetVersion(), loading it\n from ntdll infrastructure coming from stat() patch [1],\n which doesn't need a manifest.\n\n\n[1] https://www.postgresql.org/message-id/flat/CA%2BhUKG%2BoLqfBVJ_j3C03QgoshrX1KxYq0LB1vJV0OXPOcZZfhA%40mail.gmail.com#bfcc256e4eda369e369275f5b4e38185\n\n\nRegards,\n\n\nJuan José Santamaría Flecha", "msg_date": "Fri, 1 Oct 2021 00:00:56 +0300", "msg_from": "Victor Spirin <v.spirin@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Thu, Sep 30, 2021 at 11:00 PM Victor Spirin <v.spirin@postgrespro.ru>\nwrote:\n\n>\n> IsWindowsVersionOrGreater(10,0,1607) always returns false\n>\n> Only IsWindowsVersionOrGreater(10,0,0) is a valid call. (There are no\n> service packs in Windows 10.)\n>\n> I haven't found a way to determine the Windows 10 release ID.\n> The RtlGetVersion function returns dwBuildNumber = 19042 on my Windows.\n>\n> I heard that Microsoft does not support older versions of Windows 10 and\n> requires a mandatory update.\n>\nYou can translate the BuildNumber to the ReleaseId, for 1607 it will be 14393\n[1].\n\nWe might find pretty much anything in the wild, the safer the check the\nbetter.\n\n[1] https://en.wikipedia.org/wiki/Windows_10_version_history\n\nRegards,\n\nJuan José Santamaría Flecha\n\nOn Thu, Sep 30, 2021 at 11:00 PM Victor Spirin <v.spirin@postgrespro.ru> wrote:\n\n IsWindowsVersionOrGreater(10,0,1607) always returns false\n\n Only IsWindowsVersionOrGreater(10,0,0) is a valid call. (There are\n no service packs in Windows 10.)\n\n I haven't found a way to determine the Windows 10 release ID.\n The RtlGetVersion function returns dwBuildNumber = 19042 on my\n Windows.\nI heard that Microsoft does not support older versions of Windows\n 10 and requires a mandatory update.You can translate the BuildNumber to the ReleaseId, for 1607 it will be 14393 [1].We might find pretty much anything in the wild, the safer the check the better.[1] https://en.wikipedia.org/wiki/Windows_10_version_historyRegards,Juan José Santamaría Flecha", "msg_date": "Fri, 1 Oct 2021 14:37:53 +0200", "msg_from": "=?UTF-8?Q?Juan_Jos=C3=A9_Santamar=C3=ADa_Flecha?=\n <juanjo.santamaria@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Thank you\n\nThank you\nIn this version of patch:\n\n1. Made function isWindows1607OrGreater() without manifest\n\n2. To open a directory using CreateFile, have to specify the \nFILE_FLAG_BACKUP_SEMANTICS flag as part of dwFlagsAndAttributes. Checks \nthat file is a directory by the GetFileAttributes function.\n\nVictor Spirin\nPostgres Professional:http://www.postgrespro.com\nThe Russian Postgres Company\n\n01.10.2021 15:37, Juan José Santamaría Flecha пишет:\n>\n> On Thu, Sep 30, 2021 at 11:00 PM Victor Spirin \n> <v.spirin@postgrespro.ru <mailto:v.spirin@postgrespro.ru>> wrote:\n>\n>\n> IsWindowsVersionOrGreater(10,0,1607) always returns false\n>\n> Only IsWindowsVersionOrGreater(10,0,0) is a valid call. (There are\n> no service packs in Windows 10.)\n>\n> I haven't found a way to determine the Windows 10 release ID.\n> The RtlGetVersion function returns dwBuildNumber = 19042 on my\n> Windows.\n>\n> I heard that Microsoft does not support older versions of Windows\n> 10 and requires a mandatory update.\n>\n> You can translate the BuildNumber to the ReleaseId, for 1607 it will \n> be 14393 [1].\n>\n> We might find pretty much anything in the wild, the safer the check \n> the better.\n>\n> [1] https://en.wikipedia.org/wiki/Windows_10_version_history \n> <https://en.wikipedia.org/wiki/Windows_10_version_history>\n>\n> Regards,\n>\n> Juan José Santamaría Flecha", "msg_date": "Mon, 4 Oct 2021 22:51:09 +0300", "msg_from": "Victor Spirin <v.spirin@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Hi\n\nAdded a small fix for calling the GetFileAttributes function\n\nVictor Spirin\nPostgres Professional:http://www.postgrespro.com\nThe Russian Postgres Company\n\n05.07.2021 16:53, Victor Spirin пишет:\n> Hi\n>\n> I used the SetFileInformationByHandle function with the \n> FILE_RENAME_FLAG_POSIX_SEMANTICS flag for the file rename function..\n>\n> 1) The _WIN32_WINNT variable needs to be increased to 0x0A00 (Windows \n> 10).  Fixed conflict with #undef CHECKSUM_TYPE_NONE\n>\n> 2) The SetFileInformationByHandle function works correctly only on \n> Windows 10 and higher.\n>\n> The app must have a manifest to check the Windows version using the \n> IsWindows10OrGreater() function. I added a manifest to all postgres \n> projects and disabled the GenerateManifest option on windows projects.\n>\n> This patch related to this post: \n> https://www.postgresql.org/message-id/CAEepm%3D0FV-k%2B%3Dd9z08cW%3DZXoR1%3Dkw9wdpkP6WAuOrKJdz-8ujg%40mail.gmail.com\n>\n>", "msg_date": "Wed, 3 Nov 2021 10:51:01 +0300", "msg_from": "Victor Spirin <v.spirin@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Hi\n\nAdded the pgunlink_windows_posix_semantics function and modified the \npgunlink function\n\nI used FILE_DISPOSITION_POSIX_SEMANTICS flag for unlink files on Windows \n10 (1607) and above.\n\n\nVictor Spirin\nPostgres Professional:http://www.postgrespro.com\nThe Russian Postgres Company\n\n05.07.2021 16:53, Victor Spirin пишет:\n> Hi\n>\n> I used the SetFileInformationByHandle function with the \n> FILE_RENAME_FLAG_POSIX_SEMANTICS flag for the file rename function..\n>\n> 1) The _WIN32_WINNT variable needs to be increased to 0x0A00 (Windows \n> 10).  Fixed conflict with #undef CHECKSUM_TYPE_NONE\n>\n> 2) The SetFileInformationByHandle function works correctly only on \n> Windows 10 and higher.\n>\n> The app must have a manifest to check the Windows version using the \n> IsWindows10OrGreater() function. I added a manifest to all postgres \n> projects and disabled the GenerateManifest option on windows projects.\n>\n> This patch related to this post: \n> https://www.postgresql.org/message-id/CAEepm%3D0FV-k%2B%3Dd9z08cW%3DZXoR1%3Dkw9wdpkP6WAuOrKJdz-8ujg%40mail.gmail.com\n>\n>", "msg_date": "Sun, 14 Nov 2021 22:53:01 +0300", "msg_from": "Victor Spirin <v.spirin@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Hi\n\nThe flags for calling the CreateFile function have been changed.\n\nVictor Spirin\nPostgres Professional:http://www.postgrespro.com\nThe Russian Postgres Company\n\n05.07.2021 16:53, Victor Spirin пишет:\n> Hi\n>\n> I used the SetFileInformationByHandle function with the \n> FILE_RENAME_FLAG_POSIX_SEMANTICS flag for the file rename function..\n>\n> 1) The _WIN32_WINNT variable needs to be increased to 0x0A00 (Windows \n> 10).  Fixed conflict with #undef CHECKSUM_TYPE_NONE\n>\n> 2) The SetFileInformationByHandle function works correctly only on \n> Windows 10 and higher.\n>\n> The app must have a manifest to check the Windows version using the \n> IsWindows10OrGreater() function. I added a manifest to all postgres \n> projects and disabled the GenerateManifest option on windows projects.\n>\n> This patch related to this post: \n> https://www.postgresql.org/message-id/CAEepm%3D0FV-k%2B%3Dd9z08cW%3DZXoR1%3Dkw9wdpkP6WAuOrKJdz-8ujg%40mail.gmail.com\n>\n>", "msg_date": "Tue, 30 Nov 2021 19:56:09 +0300", "msg_from": "Victor Spirin <v.spirin@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Tue, Jul 6, 2021 at 1:43 PM Michael Paquier <michael@paquier.xyz> wrote:\n> This is a large bump for Studio >= 2015 I am afraid. That does not\n> seem acceptable, as it means losing support for GetLocaleInfoEx()\n> across older versions.\n\nPlaying the devil's advocate here: why shouldn't we routinely drop\nsupport for anything that'll be EOL'd when a given PostgreSQL major\nrelease ships? The current policy seems somewhat extreme in the other\ndirection: our target OS baseline is a contemporary of RHEL 2 or 3 and\nLinux 2.4.x, and our minimum compiler is a contemporary of GCC 3.x.\n\nSomething EOL'd over a year ago that has a bunch of features we've\nreally always wanted, like Unix domain sockets and Unix link\nsemantics, seems like a reasonable choice to me... hypothetical users\nwho refuse to upgrade or buy the extreme long life support options\njust can't upgrade to PostgreSQL 15 until they upgrade their OS.\nWhat's wrong with that? I don't think PostgreSQL 15 should support\nFreeBSD 6, RHEL 4 or AT&T Unix Release 1 either.\n\n\n", "msg_date": "Fri, 10 Dec 2021 17:23:09 +1300", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Fri, Dec 10, 2021 at 5:23 PM Thomas Munro <thomas.munro@gmail.com> wrote:\n> Playing the devil's advocate here: why shouldn't we routinely drop\n> support for anything that'll be EOL'd when a given PostgreSQL major\n> release ships? The current policy seems somewhat extreme in the other\n> direction: our target OS baseline is a contemporary of RHEL 2 or 3 and\n> Linux 2.4.x, and our minimum compiler is a contemporary of GCC 3.x.\n\nOops, I take those contemporaries back, I was looking at older\ndocumentation... but still the general point stands, can't we be a\nlittle more aggressive?\n\n\n", "msg_date": "Fri, 10 Dec 2021 17:28:04 +1300", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Thomas Munro <thomas.munro@gmail.com> writes:\n> Playing the devil's advocate here: why shouldn't we routinely drop\n> support for anything that'll be EOL'd when a given PostgreSQL major\n> release ships?\n\nI don't like the word \"routinely\" here. Your next bit is a better\nargument:\n\n> Something EOL'd over a year ago that has a bunch of features we've\n> really always wanted, like Unix domain sockets and Unix link\n> semantics, seems like a reasonable choice to me...\n\nMy general approach to platform compatibility is that when we\nbreak compatibility with old versions of something, we should do so\nbecause it will bring concrete benefits. If we can plausibly\ndrop support for Windows versions that don't have POSIX rename\nsemantics, I'm 100% for that. I'm not for dropping support for\nsome platform just because it's old.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Thu, 09 Dec 2021 23:33:17 -0500", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Thu, Dec 09, 2021 at 11:33:17PM -0500, Tom Lane wrote:\n> My general approach to platform compatibility is that when we\n> break compatibility with old versions of something, we should do so\n> because it will bring concrete benefits. If we can plausibly\n> drop support for Windows versions that don't have POSIX rename\n> semantics, I'm 100% for that. I'm not for dropping support for\n> some platform just because it's old.\n\nI'd agree with that. Now, I would also say if we need something that\ndepends on a newer version of _WIN32_WINNT that proves to be trickier\nor even not possible for older versions, there could be an argument\nfor dropping older versions, even in the back-branches, if the problem\nto-be-fixed is bad enough. In short history, we've never had to go\ndown to that AFAIK, though.\n--\nMichael", "msg_date": "Mon, 13 Dec 2021 09:18:18 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Hi\n\nUpdated patch: we use the posix semantic features in Windows build 17763 \nand up.\nWe found an issue with this feature on Windows Server 2016 without \nupdates (Windows 1607 Build 14393)\n\nVictor Spirin\nPostgres Professional:http://www.postgrespro.com\nThe Russian Postgres Company\n\n05.07.2021 16:53, Victor Spirin пишет:\n> Hi\n>\n> I used the SetFileInformationByHandle function with the \n> FILE_RENAME_FLAG_POSIX_SEMANTICS flag for the file rename function..\n>\n> 1) The _WIN32_WINNT variable needs to be increased to 0x0A00 (Windows \n> 10).  Fixed conflict with #undef CHECKSUM_TYPE_NONE\n>\n> 2) The SetFileInformationByHandle function works correctly only on \n> Windows 10 and higher.\n>\n> The app must have a manifest to check the Windows version using the \n> IsWindows10OrGreater() function. I added a manifest to all postgres \n> projects and disabled the GenerateManifest option on windows projects.\n>\n> This patch related to this post: \n> https://www.postgresql.org/message-id/CAEepm%3D0FV-k%2B%3Dd9z08cW%3DZXoR1%3Dkw9wdpkP6WAuOrKJdz-8ujg%40mail.gmail.com\n>\n>", "msg_date": "Wed, 6 Apr 2022 01:39:59 +0300", "msg_from": "Victor Spirin <v.spirin@postgrespro.ru>", "msg_from_op": true, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Thu, 9 Dec 2021 at 23:36, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> I'm not for dropping support for some platform just because it's old.\n\nI guess I'll have to spin up the Vax again :)\n\n\n", "msg_date": "Fri, 8 Apr 2022 10:12:04 -0400", "msg_from": "Greg Stark <stark@mit.edu>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Fri, Apr 8, 2022 at 10:12 AM Greg Stark <stark@mit.edu> wrote:\n> On Thu, 9 Dec 2021 at 23:36, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> > I'm not for dropping support for some platform just because it's old.\n>\n> I guess I'll have to spin up the Vax again :)\n\nThis is a pretty good summary of what's wrong with our current\ndeprecation policy. Like Tom, I kind of hate removing support for old\nsystems. But I've also come to realize that we often end up supporting\nsystems because there's one PostgreSQL developer who has access and\nsets up a buildfarm member ... which tends to mean that we support all\nthe stuff that lots of people are using, plus a pretty random subset\nof older systems that do funny things and most people can't access to\ndebug any problems that may occur. And that's kind of annoying.\n\n(I don't have a specific proposal for what to do about it.)\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 8 Apr 2022 11:29:59 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Fri, 8 Apr 2022 at 11:30, Robert Haas <robertmhaas@gmail.com> wrote:\n>\n> On Fri, Apr 8, 2022 at 10:12 AM Greg Stark <stark@mit.edu> wrote:\n> > On Thu, 9 Dec 2021 at 23:36, Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> > > I'm not for dropping support for some platform just because it's old.\n> >\n> > I guess I'll have to spin up the Vax again :)\n>\n> This is a pretty good summary of what's wrong with our current\n> deprecation policy.\n\nI didn't intend it that way but, ok.\n\n> Like Tom, I kind of hate removing support for old\n> systems. But I've also come to realize that we often end up supporting\n> systems because there's one PostgreSQL developer who has access and\n> sets up a buildfarm member ... which tends to mean that we support all\n> the stuff that lots of people are using, plus a pretty random subset\n> of older systems that do funny things and most people can't access to\n> debug any problems that may occur. And that's kind of annoying.\n\nGenerally I think supporting older systems that do funny things is\nhelpful in avoiding problems that either 1) Can happen on newer\nsystems but rarely 2) Can happen on other systems that people are\nusing but we don't know about and aren't testing and 3) Can happen on\nfuture systems or future compilers and we might not even find out\nabout.\n\nBut that's useful for some things and not for others. Like, it's\nuseful to be sure we don't have odd dependencies on timing quirks of\nthe specific machines that are currently common, or depend on gcc/llvm\ncompiler behaviour that isn't guaranteed. But less so for supporting\nsome quirky filesystem behaviour on Windows 8 that newer Windows\ndoesn't have and Unix guarantees not to have. (Or supporting non-IEEE\nVax FP now that we've decided we just don't any more).\n\n-- \ngreg\n\n\n", "msg_date": "Fri, 8 Apr 2022 11:44:31 -0400", "msg_from": "Greg Stark <stark@mit.edu>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Fri, Apr 8, 2022 at 11:45 AM Greg Stark <stark@mit.edu> wrote:\n> But that's useful for some things and not for others. Like, it's\n> useful to be sure we don't have odd dependencies on timing quirks of\n> the specific machines that are currently common, or depend on gcc/llvm\n> compiler behaviour that isn't guaranteed. But less so for supporting\n> some quirky filesystem behaviour on Windows 8 that newer Windows\n> doesn't have and Unix guarantees not to have. (Or supporting non-IEEE\n> Vax FP now that we've decided we just don't any more).\n\nYeah, exactly.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 8 Apr 2022 11:52:20 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Fri, Apr 08, 2022 at 11:44:31AM -0400, Greg Stark wrote:\n> Generally I think supporting older systems that do funny things is\n> helpful in avoiding problems that either 1) Can happen on newer\n> systems but rarely 2) Can happen on other systems that people are\n> using but we don't know about and aren't testing and 3) Can happen on\n> future systems or future compilers and we might not even find out\n> about.\n\nAgreed. I think that things can be usually helpful. Now, I am not\nreally convinced that there is a strong need in running a VAX if you\nare worrying about timing issues so this is a matter of balance. You\ncould get down to the same level of coverage with something as cheap\nas a Raspberry PI or such. There are also configure switches that\nemulate rather non-common behaviors, like --disable-spinlocks or\n--disable-atomics that I'd rather never see gone.\n--\nMichael", "msg_date": "Wed, 13 Apr 2022 16:04:03 +0900", "msg_from": "Michael Paquier <michael@paquier.xyz>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Wed, Apr 13, 2022 at 3:04 AM Michael Paquier <michael@paquier.xyz> wrote:\n> Agreed. I think that things can be usually helpful. Now, I am not\n> really convinced that there is a strong need in running a VAX if you\n> are worrying about timing issues so this is a matter of balance. You\n> could get down to the same level of coverage with something as cheap\n> as a Raspberry PI or such. There are also configure switches that\n> emulate rather non-common behaviors, like --disable-spinlocks or\n> --disable-atomics that I'd rather never see gone.\n\nI don't really agree with you about any of this.\n\nA Raspberry Pi running Linux is very similar to any other Linux\nmachine, except with less resources. A machine running a different\noperating system is a totally different thing. I agree that there is\nvalue in supporting other operating systems if those are things people\nmight actually use, because it's good for PostgreSQL to be portable,\nbut it isn't really useful for us to be portable to systems that\nnobody runs any more. I am just old enough to have a bit of nostalgia\nabout the idea of surrendering support for VAX or a PDP-11 or an IRIX\nworkstation, but in practice efforts to port to those platforms are\nefforts for the dedicated hobbyist rather than anything that will make\nPostgreSQL a better piece of software.\n\nSimilarly for spinlocks and atomics. There have been in the past\nsystems that did not have these things, but it seems very unlikely\nthat there will be new systems in the future that don't have these\nthings. And continuing to support those configurations actually adds a\nlot of complexity. The code is weirdly structured so that you can\nemulate atomics with spinlocks, but spinlocks on most systems are\nactually built using atomics, except when we emulate spinlocks with\nsemaphores. It's really kind of a mess, and we could clean things up\nand make them more straightforward if we were willing to decide that\natomics and spinlocks are basic requirements for PostgreSQL to run.\n\nNow I don't know if we should decide that today or at some point in\nthe future, but single-processor architectures are pretty dead.\nEmbedded devices like phones are shipping with multiple cores. Even a\nsingle processor system is probably based on an underlying\narchitecture that is multiprocessor capable. Where, outside of a\nmuseum, would you find a system that required --disable-spinlocks or\n--disable-atomics?\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Wed, 13 Apr 2022 09:38:56 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Robert Haas <robertmhaas@gmail.com> writes:\n> On Wed, Apr 13, 2022 at 3:04 AM Michael Paquier <michael@paquier.xyz> wrote:\n>> Agreed. I think that things can be usually helpful. Now, I am not\n>> really convinced that there is a strong need in running a VAX if you\n>> are worrying about timing issues so this is a matter of balance. You\n>> could get down to the same level of coverage with something as cheap\n>> as a Raspberry PI or such. There are also configure switches that\n>> emulate rather non-common behaviors, like --disable-spinlocks or\n>> --disable-atomics that I'd rather never see gone.\n\n> I don't really agree with you about any of this.\n\nMeh. I agree that it seems unlikely that anyone will come out with a\nnew processor design that lacks the ability to do spinlocks or atomics.\nIt's substantially more likely though that someone would want those\nconfigure switches temporarily while in the process of porting\nPostgres to a new processor, so that they don't have to make\nabsolutely everything work before they can test anything.\n\nIndependently of that, I think that our interest in weird old\nprocessors is mostly about checking our assumptions about exactly\nwhat processor-dependent facilities look like. For example,\nalthough I agree that spinlocks should be possible on everything\nwe care about supporting, I missed the stone tablet on which it is\ngraven that thou shalt use zero for the unlocked state of a spinlock.\nThe main reason I keep my old HPPA dinosaur alive is because it is\n(I think) our only remaining architecture in which that isn't true,\nand I think we need to keep ourselves honest about that sort of\ndetail. Next decade's hot new processor design might do things\ndifferently enough that it matters that we use SpinLockInit()\nnot memset-to-zero. This is not academic either, as we've had\nexactly such bugs in the past.\n\nThe situation for OSes is a bit different, because IMV we generally\nprefer to restrict ourselves to POSIX-compliant system calls,\nand to the extent we can do that all OSes look alike. The reason\nthat Windows is such a grade-A pain in the rear is exactly that\ntheir POSIX compliance sucks, and yet we committed to supporting\nthem anyway. If some new OS that is not POSIX-compliant comes\ndown the pike, I think we're far more likely to decline to support\nit than otherwise.\n\nBut to tie this back to the point of the thread --- anytime we\ncan reasonably start to rely on POSIX behavior in newer versions\nof Windows, I'm for that.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 13 Apr 2022 10:19:44 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Wed, Apr 13, 2022 at 10:19 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> Meh. I agree that it seems unlikely that anyone will come out with a\n> new processor design that lacks the ability to do spinlocks or atomics.\n> It's substantially more likely though that someone would want those\n> configure switches temporarily while in the process of porting\n> Postgres to a new processor, so that they don't have to make\n> absolutely everything work before they can test anything.\n\nIt's possible. I don't think it's super-likely. If someone is\nintroducing a new architecture, they're probably going to make getting\nthe Linux kernel and gcc working on it a pretty high priority, and\nthey'll probably make the gcc intrinsics work, too. But you never\nknow. Humans are unpredictable like that.\n\n> Independently of that, I think that our interest in weird old\n> processors is mostly about checking our assumptions about exactly\n> what processor-dependent facilities look like. For example,\n> although I agree that spinlocks should be possible on everything\n> we care about supporting, I missed the stone tablet on which it is\n> graven that thou shalt use zero for the unlocked state of a spinlock.\n> The main reason I keep my old HPPA dinosaur alive is because it is\n> (I think) our only remaining architecture in which that isn't true,\n> and I think we need to keep ourselves honest about that sort of\n> detail. Next decade's hot new processor design might do things\n> differently enough that it matters that we use SpinLockInit()\n> not memset-to-zero. This is not academic either, as we've had\n> exactly such bugs in the past.\n\nHere again, I think it's definitely possible that that could happen,\nbut I don't think it's super-likely. Nobody's really implementing\nspinlocks as a primitive any more; they implement atomics, and you can\ndecide for yourself how to build spinlocks on top of that and what\nvalues you want to use. And if you did decide to provide spinlocks but\nnot atomics for some reason, you'd probably use 0 and 1 rather than 17\nand 42 just because otherwise a lot of software wouldn't work on your\nbrand new hardware, which as a hardware manufacturer is a thing you\nreally don't want. We can be as rigorous as we like about this sort of\nthing, but I bet that in 2022 there is a huge amount of code out that\nassumes memset(...., 0, ...) is good enough. And, like, nobody's going\nto be that excited about building a machine where PostgreSQL works\nbecause we've carefully avoided this assumption, but 5000 other\nsoftware packages that haven't been as careful all break.\n\n> The situation for OSes is a bit different, because IMV we generally\n> prefer to restrict ourselves to POSIX-compliant system calls,\n> and to the extent we can do that all OSes look alike. The reason\n> that Windows is such a grade-A pain in the rear is exactly that\n> their POSIX compliance sucks, and yet we committed to supporting\n> them anyway. If some new OS that is not POSIX-compliant comes\n> down the pike, I think we're far more likely to decline to support\n> it than otherwise.\n\nYeah.\n\n> But to tie this back to the point of the thread --- anytime we\n> can reasonably start to rely on POSIX behavior in newer versions\n> of Windows, I'm for that.\n\nSure, makes sense.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Wed, 13 Apr 2022 10:38:41 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Hi,\n\nOn 2022-04-13 10:19:44 -0400, Tom Lane wrote:\n> Meh. I agree that it seems unlikely that anyone will come out with a\n> new processor design that lacks the ability to do spinlocks or atomics.\n> It's substantially more likely though that someone would want those\n> configure switches temporarily while in the process of porting\n> Postgres to a new processor, so that they don't have to make\n> absolutely everything work before they can test anything.\n\nI still think we ought to provide a compiler intrinsics spinlock\nimplementation for that...\n\n\n> Independently of that, I think that our interest in weird old\n> processors is mostly about checking our assumptions about exactly\n> what processor-dependent facilities look like. For example,\n> although I agree that spinlocks should be possible on everything\n> we care about supporting, I missed the stone tablet on which it is\n> graven that thou shalt use zero for the unlocked state of a spinlock.\n> The main reason I keep my old HPPA dinosaur alive is because it is\n> (I think) our only remaining architecture in which that isn't true,\n> and I think we need to keep ourselves honest about that sort of\n> detail.\n\nThe other thing it currently has is the weird wide spinlock state where\nwe don't know which byte is going to be modified ... I don't think\nthat's likely to be needed again though.\n\n\n> Next decade's hot new processor design might do things\n> differently enough that it matters that we use SpinLockInit()\n> not memset-to-zero. This is not academic either, as we've had\n> exactly such bugs in the past.\n\nFWIW, I'l like to make spinlocks and atomics assert out if they've not\nbeen initialized (which'd include preventing uninitialized use of\nlwlocks). It's easy to accidentally zero out the state or start out\nuninitialized. Right now nothing will complain on platforms created\nafter 1700 or using --disable-spinlocks --disable-atomics. That should\nbe caught well before running on the buildfarm...\n\nThen the zero-state assumption wouldn't require continuing to support\nHPPA.\n\n\n> The situation for OSes is a bit different, because IMV we generally\n> prefer to restrict ourselves to POSIX-compliant system calls,\n> and to the extent we can do that all OSes look alike. The reason\n> that Windows is such a grade-A pain in the rear is exactly that\n> their POSIX compliance sucks, and yet we committed to supporting\n> them anyway. If some new OS that is not POSIX-compliant comes\n> down the pike, I think we're far more likely to decline to support\n> it than otherwise.\n\nOur windows support is not in a great state. Part of that is that we\njust plaster random hacks over issues. Which often are only needed on\nwindows version that nobody has access to. As you say that's different\nfrom most of the hackiness to support some random old unix platform,\nwhich most of the time much more localized (with the exception of not\nrelying on threads in some places due to old platforms).\n\n\n> But to tie this back to the point of the thread --- anytime we\n> can reasonably start to rely on POSIX behavior in newer versions\n> of Windows, I'm for that.\n\nYea. Same imo is true for msvc specific compiler oddities. If we can\nsimplify things by requiring a halfway modern msvc version, we shouldn't\nhesitate.\n\nI think it might be worth noting somewhere developer oriented that we're\nok with dropping support in HEAD for windows versions that aren't\n\"fully\" supported anymore. Even if one can procure extended support for\na gazillion or two, they're not going to do that to run PG 19.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Wed, 13 Apr 2022 08:03:06 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Andres Freund <andres@anarazel.de> writes:\n> On 2022-04-13 10:19:44 -0400, Tom Lane wrote:\n>> Next decade's hot new processor design might do things\n>> differently enough that it matters that we use SpinLockInit()\n>> not memset-to-zero. This is not academic either, as we've had\n>> exactly such bugs in the past.\n\n> FWIW, I'l like to make spinlocks and atomics assert out if they've not\n> been initialized (which'd include preventing uninitialized use of\n> lwlocks). It's easy to accidentally zero out the state or start out\n> uninitialized. Right now nothing will complain on platforms created\n> after 1700 or using --disable-spinlocks --disable-atomics. That should\n> be caught well before running on the buildfarm...\n\nYeah, even just doing that in --disable-spinlocks builds would be\nenough for the purpose, and be much more accessible to Joe Developer.\n\n> Then the zero-state assumption wouldn't require continuing to support\n> HPPA.\n\nI wouldn't mind retiring that machine once v11 is EOL. (It's also one\nof very few animals testing pre-C99 compilers, so not before then.)\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 13 Apr 2022 11:25:42 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Wed, Apr 13, 2022 at 11:03 AM Andres Freund <andres@anarazel.de> wrote:\n> > Next decade's hot new processor design might do things\n> > differently enough that it matters that we use SpinLockInit()\n> > not memset-to-zero. This is not academic either, as we've had\n> > exactly such bugs in the past.\n>\n> FWIW, I'l like to make spinlocks and atomics assert out if they've not\n> been initialized (which'd include preventing uninitialized use of\n> lwlocks). It's easy to accidentally zero out the state or start out\n> uninitialized. Right now nothing will complain on platforms created\n> after 1700 or using --disable-spinlocks --disable-atomics. That should\n> be caught well before running on the buildfarm...\n\nI don't understand this bit about platforms created after 1700. Before\n1700, they didn't even have computers.\n\nAm I being really dense here?\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Wed, 13 Apr 2022 11:30:33 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "Hi, \n\nOn April 13, 2022 8:30:33 AM PDT, Robert Haas <robertmhaas@gmail.com> wrote:\n>On Wed, Apr 13, 2022 at 11:03 AM Andres Freund <andres@anarazel.de> wrote:\n>> > Next decade's hot new processor design might do things\n>> > differently enough that it matters that we use SpinLockInit()\n>> > not memset-to-zero. This is not academic either, as we've had\n>> > exactly such bugs in the past.\n>>\n>> FWIW, I'l like to make spinlocks and atomics assert out if they've not\n>> been initialized (which'd include preventing uninitialized use of\n>> lwlocks). It's easy to accidentally zero out the state or start out\n>> uninitialized. Right now nothing will complain on platforms created\n>> after 1700 or using --disable-spinlocks --disable-atomics. That should\n>> be caught well before running on the buildfarm...\n>\n>I don't understand this bit about platforms created after 1700. Before\n>1700, they didn't even have computers.\n>\n>Am I being really dense here?\n\nIt was a sarcastic reference to the age of pa-risc (the only platform detecting zeroed out spinlocks).\n\nAndres\n\n-- \nSent from my Android device with K-9 Mail. Please excuse my brevity.\n\n\n", "msg_date": "Wed, 13 Apr 2022 08:38:29 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "On Wed, Apr 6, 2022 at 10:40 AM Victor Spirin <v.spirin@postgrespro.ru> wrote:\n> Updated patch: we use the posix semantic features in Windows build 17763\n> and up.\n> We found an issue with this feature on Windows Server 2016 without\n> updates (Windows 1607 Build 14393)\n\nHi Victor,\n\nI rebased and simplified this, and added a lot of tests to be able to\nunderstand what it does. I think all systems that didn't have this\nare now EOL and we don't need to support them in PG16, but perhaps our\n_WIN32_WINNT is not quite high enough (this requires Win10 RS1, which\nitself was EOL'd in 2019); the main question I have now is what\nhappens when you run this on non-NTFS filesystems, and whether we want\nto *require* this to work because the non-POSIX support will probably\nfinish up untested. I posted all that over on a new thread where I am\ntidying up lots of related stuff, and I didn't want to repost the\nproposed testing framework in multiple threads...\n\nhttps://www.postgresql.org/message-id/flat/CA%2BhUKG%2BajSQ_8eu2AogTncOnZ5me2D-Cn66iN_-wZnRjLN%2Bicg%40mail.gmail.com\n\n\n", "msg_date": "Wed, 19 Oct 2022 10:05:17 +1300", "msg_from": "Thomas Munro <thomas.munro@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." }, { "msg_contents": "2022年10月19日(水) 6:06 Thomas Munro <thomas.munro@gmail.com>:\n>\n> On Wed, Apr 6, 2022 at 10:40 AM Victor Spirin <v.spirin@postgrespro.ru> wrote:\n> > Updated patch: we use the posix semantic features in Windows build 17763\n> > and up.\n> > We found an issue with this feature on Windows Server 2016 without\n> > updates (Windows 1607 Build 14393)\n>\n> Hi Victor,\n>\n> I rebased and simplified this, and added a lot of tests to be able to\n> understand what it does. I think all systems that didn't have this\n> are now EOL and we don't need to support them in PG16, but perhaps our\n> _WIN32_WINNT is not quite high enough (this requires Win10 RS1, which\n> itself was EOL'd in 2019); the main question I have now is what\n> happens when you run this on non-NTFS filesystems, and whether we want\n> to *require* this to work because the non-POSIX support will probably\n> finish up untested. I posted all that over on a new thread where I am\n> tidying up lots of related stuff, and I didn't want to repost the\n> proposed testing framework in multiple threads...\n>\n> https://www.postgresql.org/message-id/flat/CA%2BhUKG%2BajSQ_8eu2AogTncOnZ5me2D-Cn66iN_-wZnRjLN%2Bicg%40mail.gmail.com\n\nHi\n\nAs Thomas has incorporated this patch into another CommitFest entry\n[1], we'll close\nthe entry for this thread [2]. If Victor or anyone else would like to\nfollow up, it would\nprobably be best to do that on the thread linked above.\n\n[1] https://commitfest.postgresql.org/40/3951/\n[2] https://commitfest.postgresql.org/40/3347/\n\n\nRegards\n\nIan Barwick\n\n\n", "msg_date": "Wed, 30 Nov 2022 21:38:12 +0900", "msg_from": "Ian Lawrence Barwick <barwick@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Atomic rename feature for Windows." } ]
[ { "msg_contents": ">Please find attached a POC patch to do just that.\n\n>The switch to the single-datum tuplesort is done when there is only one\n>attribute, it is byval (to avoid having to deal with copy of the\nreferences\n>everywhere) and we are not in bound mode (to also avoid having to move\nthings\n>around).\nHi, nice results!\n\nI have a few suggestions and questions to your patch:\n\n1. Why do you moved the declaration of variable *plannode?\nI think this is unnecessary, extend the scope.\n\n2. Why do you declare a new variable TupleDesc out_tuple_desc at\nExecInitSort?\nI think this is unnecessary too, maybe I didn't notice something.\n\n3. I inverted the order of check at this line, I think \"!node-bounded\" is\nmore cheap that TupleDescAttr(tupDesc, 0) ->attbyval\n\n4. Once that you changed the order of execution, this test is unlikely that\nhappens, so add unlikely helps the branch.\n\n5. I think that you add a invariant inside the loop\n\"if(node->is_single_val)\"?\nWould not be better two fors?\n\nFor you convenience, I attached a v2 version (with styles changes), please\ntake a look and can you repeat yours tests?\n\nregards,\nRanier Vilela", "msg_date": "Mon, 5 Jul 2021 11:51:59 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Add proper planner support for ORDER BY / DISTINCT aggregates" }, { "msg_contents": "Le lundi 5 juillet 2021, 16:51:59 CEST Ranier Vilela a écrit :\n> >Please find attached a POC patch to do just that.\n> >\n> >The switch to the single-datum tuplesort is done when there is only one\n> >attribute, it is byval (to avoid having to deal with copy of the\n> \n> references\n> \n> >everywhere) and we are not in bound mode (to also avoid having to move\n> \n> things\n> \n> >around).\n> \n> Hi, nice results!\n> \n> I have a few suggestions and questions to your patch:\n\nThank you for those !\n> \n> 1. Why do you moved the declaration of variable *plannode?\n> I think this is unnecessary, extend the scope.\n\nSorry, I should have cleaned it up before sending.\n\n> \n> 2. Why do you declare a new variable TupleDesc out_tuple_desc at\n> ExecInitSort?\n> I think this is unnecessary too, maybe I didn't notice something.\n\nSame as the above, thanks for the two.\n> \n> 3. I inverted the order of check at this line, I think \"!node-bounded\" is\n> more cheap that TupleDescAttr(tupDesc, 0) ->attbyval\n\nI'm not sure it matters since it's done once per sort but Ok\n> \n> 4. Once that you changed the order of execution, this test is unlikely that\n> happens, so add unlikely helps the branch.\n\nOk.\n\n> \n> 5. I think that you add a invariant inside the loop\n> \"if(node->is_single_val)\"?\n> Would not be better two fors?\n\nOk for me.\n\n> \n> For you convenience, I attached a v2 version (with styles changes), please\n> take a look and can you repeat yours tests?\n\nTested it quickly, and did not see any change performance wise that cannot be \nattributed to noise on my laptop but it's fine.\n\nThank you for the fixes !\n\n> \n> regards,\n> Ranier Vilela\n\n\n-- \nRonan Dunklau\n\n\n\n\n", "msg_date": "Mon, 05 Jul 2021 17:07:27 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": false, "msg_subject": "Re: Add proper planner support for ORDER BY / DISTINCT aggregates" }, { "msg_contents": "Em seg., 5 de jul. de 2021 às 12:07, Ronan Dunklau <ronan.dunklau@aiven.io>\nescreveu:\n\n> Le lundi 5 juillet 2021, 16:51:59 CEST Ranier Vilela a écrit :\n> > >Please find attached a POC patch to do just that.\n> > >\n> > >The switch to the single-datum tuplesort is done when there is only one\n> > >attribute, it is byval (to avoid having to deal with copy of the\n> >\n> > references\n> >\n> > >everywhere) and we are not in bound mode (to also avoid having to move\n> >\n> > things\n> >\n> > >around).\n> >\n> > Hi, nice results!\n> >\n> > I have a few suggestions and questions to your patch:\n>\n> Thank you for those !\n> >\n> > 1. Why do you moved the declaration of variable *plannode?\n> > I think this is unnecessary, extend the scope.\n>\n> Sorry, I should have cleaned it up before sending.\n>\n> >\n> > 2. Why do you declare a new variable TupleDesc out_tuple_desc at\n> > ExecInitSort?\n> > I think this is unnecessary too, maybe I didn't notice something.\n>\n> Same as the above, thanks for the two.\n> >\n> > 3. I inverted the order of check at this line, I think \"!node-bounded\" is\n> > more cheap that TupleDescAttr(tupDesc, 0) ->attbyval\n>\n> I'm not sure it matters since it's done once per sort but Ok\n> >\n> > 4. Once that you changed the order of execution, this test is unlikely\n> that\n> > happens, so add unlikely helps the branch.\n>\n> Ok.\n>\n> >\n> > 5. I think that you add a invariant inside the loop\n> > \"if(node->is_single_val)\"?\n> > Would not be better two fors?\n>\n> Ok for me.\n>\n> >\n> > For you convenience, I attached a v2 version (with styles changes),\n> please\n> > take a look and can you repeat yours tests?\n>\n> Tested it quickly, and did not see any change performance wise that cannot\n> be\n> attributed to noise on my laptop but it's fine.\n>\nThanks for testing again.\n\n\n> Thank you for the fixes !\n>\nYou are welcome.\n\nregards,\nRanier Vilela\n\nEm seg., 5 de jul. de 2021 às 12:07, Ronan Dunklau <ronan.dunklau@aiven.io> escreveu:Le lundi 5 juillet 2021, 16:51:59 CEST Ranier Vilela a écrit :\n> >Please find attached a POC patch to do just that.\n> >\n> >The switch to the single-datum tuplesort is done when there is only one\n> >attribute, it is byval (to avoid having to deal with copy of the\n> \n> references\n> \n> >everywhere) and we are not in bound mode (to also avoid having to move\n> \n> things\n> \n> >around).\n> \n> Hi, nice results!\n> \n> I have a few suggestions and questions to your patch:\n\nThank you for those !\n> \n> 1. Why do you moved the declaration of variable *plannode?\n> I think this is unnecessary, extend the scope.\n\nSorry, I should have cleaned it up before sending.\n\n> \n> 2. Why do you declare a new variable TupleDesc out_tuple_desc at\n> ExecInitSort?\n> I think this is unnecessary too, maybe I didn't notice something.\n\nSame as the above, thanks for the two.\n> \n> 3. I inverted the order of check at this line, I think \"!node-bounded\" is\n> more cheap that TupleDescAttr(tupDesc, 0) ->attbyval\n\nI'm not sure it matters since it's done once per sort but Ok\n> \n> 4. Once that you changed the order of execution, this test is unlikely that\n> happens, so add unlikely helps the branch.\n\nOk.\n\n> \n> 5. I think that you add a invariant inside the loop\n> \"if(node->is_single_val)\"?\n> Would not be better two fors?\n\nOk for me.\n\n> \n> For you convenience, I attached a v2 version (with styles changes), please\n> take a look and can you repeat yours tests?\n\nTested it quickly, and did not see any change performance wise that cannot be \nattributed to noise on my laptop but it's fine.Thanks for testing again. \n\nThank you for the fixes !You are welcome.regards,Ranier Vilela", "msg_date": "Mon, 5 Jul 2021 12:39:52 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Add proper planner support for ORDER BY / DISTINCT aggregates" } ]
[ { "msg_contents": "While re-reading this code I found a small typo and fixed it (making\nthe comment more explicit at the same time).\n\nThanks,\nJames", "msg_date": "Mon, 5 Jul 2021 14:54:51 -0400", "msg_from": "James Coleman <jtc331@gmail.com>", "msg_from_op": true, "msg_subject": "Minor typo in generate_useful_gather_paths comment" }, { "msg_contents": "On Tue, 6 Jul 2021 at 06:55, James Coleman <jtc331@gmail.com> wrote:\n> While re-reading this code I found a small typo and fixed it (making\n> the comment more explicit at the same time).\n\nThanks. Pushed (9ee91cc58).\n\nDavid\n\n\n", "msg_date": "Tue, 6 Jul 2021 12:40:02 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Minor typo in generate_useful_gather_paths comment" } ]
[ { "msg_contents": "Hello,\n\nWhile testing the patch \"Add proper planner support for ORDER BY / DISTINCT \naggregates\" [0] I discovered the performance penalty from adding a sort node \nessentially came from not using the single-datum tuplesort optimization in \nExecSort (contrary to the sorting done in ExecAgg).\n\nI originally proposed this patch as a companion in the same thread [1], but \nfollowing James suggestion I'm making a separate thread just for this as the \noptimization is worthwhile independently of David's patch: it looks like we \ncan expect a 2x speedup on a \"select a single ordered column\" case.\n\nThe patch aimed to be as simple as possible: we only turn this optimization on \nwhen the tuple being sorted has only one attribute, it is \"byval\" (so as not \nto incur copies which would be hard to track in the execution tree) and \nunbound (again, not having to deal with copying borrowed datum anywhere).\n\nThe attached patch is originally by me, with some cleanup by Ranier Vilela. \nI'm sending Ranier's version here.\n\n\n[0]: https://commitfest.postgresql.org/33/3164/\n[1]: https://www.postgresql.org/message-id/4480689.ObhdGn8bVM%40aivenronan\n\n-- \nRonan Dunklau", "msg_date": "Tue, 06 Jul 2021 08:15:41 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": true, "msg_subject": "[PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em ter., 6 de jul. de 2021 às 03:15, Ronan Dunklau <ronan.dunklau@aiven.io>\nescreveu:\n\n> Hello,\n>\n> While testing the patch \"Add proper planner support for ORDER BY /\n> DISTINCT\n> aggregates\" [0] I discovered the performance penalty from adding a sort\n> node\n> essentially came from not using the single-datum tuplesort optimization in\n> ExecSort (contrary to the sorting done in ExecAgg).\n>\n> I originally proposed this patch as a companion in the same thread [1],\n> but\n> following James suggestion I'm making a separate thread just for this as\n> the\n> optimization is worthwhile independently of David's patch: it looks like\n> we\n> can expect a 2x speedup on a \"select a single ordered column\" case.\n>\n> The patch aimed to be as simple as possible: we only turn this\n> optimization on\n> when the tuple being sorted has only one attribute, it is \"byval\" (so as\n> not\n> to incur copies which would be hard to track in the execution tree) and\n> unbound (again, not having to deal with copying borrowed datum anywhere).\n>\n> The attached patch is originally by me, with some cleanup by Ranier\n> Vilela.\n> I'm sending Ranier's version here.\n>\nNice Ronan.\nBut I think there is some confusion here.\nThe author is not you?\n\nJust to clarify, at Commitfest, it was supposed to be the other way around.\nYou as an author and David as a reviewer.\nI'll put myself as a reviewer too.\n\nregards,\nRanier Vilela\n\nEm ter., 6 de jul. de 2021 às 03:15, Ronan Dunklau <ronan.dunklau@aiven.io> escreveu:Hello,\n\nWhile testing the patch \"Add proper planner support for ORDER BY / DISTINCT \naggregates\" [0] I discovered the performance penalty from adding a sort node \nessentially came from not using the single-datum tuplesort optimization in \nExecSort (contrary to the sorting done in ExecAgg).\n\nI originally proposed this patch as a companion in the same thread [1], but \nfollowing James suggestion I'm making a separate thread just for this as the \noptimization is worthwhile independently of David's patch: it looks like we \ncan expect a 2x speedup on a \"select a single ordered column\" case.\n\nThe patch aimed to be as simple as possible: we only turn this optimization on \nwhen the tuple being sorted has only one attribute, it is \"byval\" (so as not \nto incur copies which would be hard to track in the execution tree) and \nunbound (again, not having to deal with copying borrowed datum anywhere).\n\nThe attached patch is originally by me, with some cleanup by Ranier Vilela. \nI'm sending Ranier's version here.Nice Ronan.But I think there is some confusion here.The author is not you?Just to clarify, at Commitfest, it was supposed to be the other way around.You as an author and David as a reviewer.I'll put myself as a reviewer too.regards,Ranier Vilela", "msg_date": "Tue, 6 Jul 2021 08:25:04 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em ter., 6 de jul. de 2021 às 08:25, Ranier Vilela <ranier.vf@gmail.com>\nescreveu:\n\n> Em ter., 6 de jul. de 2021 às 03:15, Ronan Dunklau <ronan.dunklau@aiven.io>\n> escreveu:\n>\n>> Hello,\n>>\n>> While testing the patch \"Add proper planner support for ORDER BY /\n>> DISTINCT\n>> aggregates\" [0] I discovered the performance penalty from adding a sort\n>> node\n>> essentially came from not using the single-datum tuplesort optimization\n>> in\n>> ExecSort (contrary to the sorting done in ExecAgg).\n>>\n>> I originally proposed this patch as a companion in the same thread [1],\n>> but\n>> following James suggestion I'm making a separate thread just for this as\n>> the\n>> optimization is worthwhile independently of David's patch: it looks like\n>> we\n>> can expect a 2x speedup on a \"select a single ordered column\" case.\n>>\n>> The patch aimed to be as simple as possible: we only turn this\n>> optimization on\n>> when the tuple being sorted has only one attribute, it is \"byval\" (so as\n>> not\n>> to incur copies which would be hard to track in the execution tree) and\n>> unbound (again, not having to deal with copying borrowed datum anywhere).\n>>\n>> The attached patch is originally by me, with some cleanup by Ranier\n>> Vilela.\n>> I'm sending Ranier's version here.\n>>\n> Nice Ronan.\n> But I think there is some confusion here.\n> The author is not you?\n>\n> Just to clarify, at Commitfest, it was supposed to be the other way around.\n> You as an author and David as a reviewer.\n> I'll put myself as a reviewer too.\n>\nSorry David, my mistake.\nI confused the numbers (id) of Commitfest.\n\nregards,\nRanier Vilela\n\nEm ter., 6 de jul. de 2021 às 08:25, Ranier Vilela <ranier.vf@gmail.com> escreveu:Em ter., 6 de jul. de 2021 às 03:15, Ronan Dunklau <ronan.dunklau@aiven.io> escreveu:Hello,\n\nWhile testing the patch \"Add proper planner support for ORDER BY / DISTINCT \naggregates\" [0] I discovered the performance penalty from adding a sort node \nessentially came from not using the single-datum tuplesort optimization in \nExecSort (contrary to the sorting done in ExecAgg).\n\nI originally proposed this patch as a companion in the same thread [1], but \nfollowing James suggestion I'm making a separate thread just for this as the \noptimization is worthwhile independently of David's patch: it looks like we \ncan expect a 2x speedup on a \"select a single ordered column\" case.\n\nThe patch aimed to be as simple as possible: we only turn this optimization on \nwhen the tuple being sorted has only one attribute, it is \"byval\" (so as not \nto incur copies which would be hard to track in the execution tree) and \nunbound (again, not having to deal with copying borrowed datum anywhere).\n\nThe attached patch is originally by me, with some cleanup by Ranier Vilela. \nI'm sending Ranier's version here.Nice Ronan.But I think there is some confusion here.The author is not you?Just to clarify, at Commitfest, it was supposed to be the other way around.You as an author and David as a reviewer.I'll put myself as a reviewer too.Sorry David, my mistake.I confused the numbers (id) of Commitfest.regards,Ranier Vilela", "msg_date": "Tue, 6 Jul 2021 08:48:51 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Adding David since this patch is likely a precondition for [1].\n\nOn Tue, Jul 6, 2021 at 2:15 AM Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n>\n> Hello,\n>\n> While testing the patch \"Add proper planner support for ORDER BY / DISTINCT\n> aggregates\" [0] I discovered the performance penalty from adding a sort node\n> essentially came from not using the single-datum tuplesort optimization in\n> ExecSort (contrary to the sorting done in ExecAgg).\n>\n> I originally proposed this patch as a companion in the same thread [1], but\n> following James suggestion I'm making a separate thread just for this as the\n> optimization is worthwhile independently of David's patch: it looks like we\n> can expect a 2x speedup on a \"select a single ordered column\" case.\n>\n> The patch aimed to be as simple as possible: we only turn this optimization on\n> when the tuple being sorted has only one attribute, it is \"byval\" (so as not\n> to incur copies which would be hard to track in the execution tree) and\n> unbound (again, not having to deal with copying borrowed datum anywhere).\n\nThanks again for finding this and working up a patch.\n\nI've taken a look, and while I haven't dug into testing it yet, I have\na few comments.\n\nFirst, the changes are lacking any explanatory comments. Probably we\nshould follow how nodeAgg does this and add both comments to the\nExecSort function header as well as specific comments above the \"if\"\naround the new tuplesort_begin_datum explaining the specific\nconditions that are required for the optimization to be useful and\nsafe.\n\nThat leads to a question I had: I don't follow why bounded mode (when\nusing byval) needs to be excluded. Comments should be added if there's\na good reason (as noted above), but maybe it's a case we can handle\nsafely?\n\nA second question: at first glance it's intuitively the case we might\nnot be able to handle byref values. But nodeAgg doesn't seem to have\nthat restriction. What's the difference here?\n\nA few small code observations:\n- In my view the addition of unlikely() in ExecSort is unlikely to be\nof benefit because it's a single call for the entire node's execution\n(not in the tuple loop).\n- It seems clearer to change the \"if (!node->is_single_val)\" to flip\nthe true/false cases so we don't need the negation.\n- I assume there are tests that likely already cover this case, but\nit'd be worth verifying that.\n\nFinally, I believe the same optimization likely ought to be added to\nnodeIncrementalSort. It's less likely the tests there are sufficient\nfor both this and the original case, but we'll see.\n\nThanks,\nJames\n\n1: https://www.postgresql.org/message-id/CAApHDvpHzfo92%3DR4W0%2BxVua3BUYCKMckWAmo-2t_KiXN-wYH%3Dw%40mail.gmail.com\n\n\n", "msg_date": "Tue, 6 Jul 2021 09:19:36 -0400", "msg_from": "James Coleman <jtc331@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em ter., 6 de jul. de 2021 às 10:19, James Coleman <jtc331@gmail.com>\nescreveu:\n\n> Adding David since this patch is likely a precondition for [1].\n>\n> On Tue, Jul 6, 2021 at 2:15 AM Ronan Dunklau <ronan.dunklau@aiven.io>\n> wrote:\n> >\n> > Hello,\n> >\n> > While testing the patch \"Add proper planner support for ORDER BY /\n> DISTINCT\n> > aggregates\" [0] I discovered the performance penalty from adding a sort\n> node\n> > essentially came from not using the single-datum tuplesort optimization\n> in\n> > ExecSort (contrary to the sorting done in ExecAgg).\n> >\n> > I originally proposed this patch as a companion in the same thread [1],\n> but\n> > following James suggestion I'm making a separate thread just for this as\n> the\n> > optimization is worthwhile independently of David's patch: it looks like\n> we\n> > can expect a 2x speedup on a \"select a single ordered column\" case.\n> >\n> > The patch aimed to be as simple as possible: we only turn this\n> optimization on\n> > when the tuple being sorted has only one attribute, it is \"byval\" (so as\n> not\n> > to incur copies which would be hard to track in the execution tree) and\n> > unbound (again, not having to deal with copying borrowed datum anywhere).\n>\n> Thanks again for finding this and working up a patch.\n>\n> I've taken a look, and while I haven't dug into testing it yet, I have\n> a few comments.\n>\n> First, the changes are lacking any explanatory comments. Probably we\n> should follow how nodeAgg does this and add both comments to the\n> ExecSort function header as well as specific comments above the \"if\"\n> around the new tuplesort_begin_datum explaining the specific\n> conditions that are required for the optimization to be useful and\n> safe.\n>\n> That leads to a question I had: I don't follow why bounded mode (when\n> using byval) needs to be excluded. Comments should be added if there's\n> a good reason (as noted above), but maybe it's a case we can handle\n> safely?\n>\n> A second question: at first glance it's intuitively the case we might\n> not be able to handle byref values. But nodeAgg doesn't seem to have\n> that restriction. What's the difference here?\n>\n> A few small code observations:\n> - In my view the addition of unlikely() in ExecSort is unlikely to be\n> of benefit because it's a single call for the entire node's execution\n> (not in the tuple loop).\n>\nNo objection. And I agree that testing is complex and needs to remain as it\nis.\n\n- It seems clearer to change the \"if (!node->is_single_val)\" to flip\n> the true/false cases so we don't need the negation.\n>\nI think yes, it can be better.\n\nregards,\nRanier Vilela\n\nEm ter., 6 de jul. de 2021 às 10:19, James Coleman <jtc331@gmail.com> escreveu:Adding David since this patch is likely a precondition for [1].\n\nOn Tue, Jul 6, 2021 at 2:15 AM Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n>\n> Hello,\n>\n> While testing the patch \"Add proper planner support for ORDER BY / DISTINCT\n> aggregates\" [0] I discovered the performance penalty from adding a sort node\n> essentially came from not using the single-datum tuplesort optimization in\n> ExecSort (contrary to the sorting done in ExecAgg).\n>\n> I originally proposed this patch as a companion in the same thread [1], but\n> following James suggestion I'm making a separate thread just for this as the\n> optimization is worthwhile independently of David's patch: it looks like we\n> can expect a 2x speedup on a \"select a single ordered column\" case.\n>\n> The patch aimed to be as simple as possible: we only turn this optimization on\n> when the tuple being sorted has only one attribute, it is \"byval\" (so as not\n> to incur copies which would be hard to track in the execution tree) and\n> unbound (again, not having to deal with copying borrowed datum anywhere).\n\nThanks again for finding this and working up a patch.\n\nI've taken a look, and while I haven't dug into testing it yet, I have\na few comments.\n\nFirst, the changes are lacking any explanatory comments. Probably we\nshould follow how nodeAgg does this and add both comments to the\nExecSort function header as well as specific comments above the \"if\"\naround the new tuplesort_begin_datum explaining the specific\nconditions that are required for the optimization to be useful and\nsafe.\n\nThat leads to a question I had: I don't follow why bounded mode (when\nusing byval) needs to be excluded. Comments should be added if there's\na good reason (as noted above), but maybe it's a case we can handle\nsafely?\n\nA second question: at first glance it's intuitively the case we might\nnot be able to handle byref values. But nodeAgg doesn't seem to have\nthat restriction. What's the difference here?\n\nA few small code observations:\n- In my view the addition of unlikely() in ExecSort is unlikely to be\nof benefit because it's a single call for the entire node's execution\n(not in the tuple loop).No objection. And I agree that testing is complex and needs to remain as it is. \n- It seems clearer to change the \"if (!node->is_single_val)\" to flip\nthe true/false cases so we don't need the negation.I think yes, it can be better.regards,Ranier Vilela", "msg_date": "Tue, 6 Jul 2021 10:36:10 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Tue, Jul 6, 2021 at 6:49 PM James Coleman <jtc331@gmail.com> wrote:\n>\n> Adding David since this patch is likely a precondition for [1].\n>\n> On Tue, Jul 6, 2021 at 2:15 AM Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n> >\n> > Hello,\n> >\n> > While testing the patch \"Add proper planner support for ORDER BY / DISTINCT\n> > aggregates\" [0] I discovered the performance penalty from adding a sort node\n> > essentially came from not using the single-datum tuplesort optimization in\n> > ExecSort (contrary to the sorting done in ExecAgg).\n> >\n> > I originally proposed this patch as a companion in the same thread [1], but\n> > following James suggestion I'm making a separate thread just for this as the\n> > optimization is worthwhile independently of David's patch: it looks like we\n> > can expect a 2x speedup on a \"select a single ordered column\" case.\n> >\n> > The patch aimed to be as simple as possible: we only turn this optimization on\n> > when the tuple being sorted has only one attribute, it is \"byval\" (so as not\n> > to incur copies which would be hard to track in the execution tree) and\n> > unbound (again, not having to deal with copying borrowed datum anywhere).\n>\n> Thanks again for finding this and working up a patch.\n>\n> I've taken a look, and while I haven't dug into testing it yet, I have\n> a few comments.\n>\n> First, the changes are lacking any explanatory comments. Probably we\n> should follow how nodeAgg does this and add both comments to the\n> ExecSort function header as well as specific comments above the \"if\"\n> around the new tuplesort_begin_datum explaining the specific\n> conditions that are required for the optimization to be useful and\n> safe.\n>\n> That leads to a question I had: I don't follow why bounded mode (when\n> using byval) needs to be excluded. Comments should be added if there's\n> a good reason (as noted above), but maybe it's a case we can handle\n> safely?\n>\n> A second question: at first glance it's intuitively the case we might\n> not be able to handle byref values. But nodeAgg doesn't seem to have\n> that restriction. What's the difference here?\n>\n\nI think tuplesort_begin_datum, doesn't have any such limitation, it\ncan handle any type of Datum so I think we don't need to consider the\nonly attbyval, we can consider any type of attribute for this\noptimization.\n\n-- \nRegards,\nDilip Kumar\nEnterpriseDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 6 Jul 2021 19:09:38 +0530", "msg_from": "Dilip Kumar <dilipbalaut@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Thank you for the review, I will address those shortly, but will answer some \nquestions in the meantime.\n\n> > First, the changes are lacking any explanatory comments. Probably we\n> > should follow how nodeAgg does this and add both comments to the\n> > ExecSort function header as well as specific comments above the \"if\"\n> > around the new tuplesort_begin_datum explaining the specific\n> > conditions that are required for the optimization to be useful and\n> > safe.\n\nDone, since I lifted the restrictions following your questions, there isn't \nmuch left to comment. (see below)\n\n> > \n> > That leads to a question I had: I don't follow why bounded mode (when\n> > using byval) needs to be excluded. Comments should be added if there's\n> > a good reason (as noted above), but maybe it's a case we can handle\n> > safely?\n\nI had test failures when trying to move the Datum around when performing a \nbounded sort, but did not look into it at first.\n\nNow I've looked into it, and the switch to a heapsort when using bounded mode \njust unconditionnaly tried to free a tuple that was never there to begin with. \nSo if the SortTuple does not contain an actual tuple, but only a single datum, \ndo not do that. \n\nI've updated the patch to fix this and enable the optimization in the case of \nbounded sort.\n\n> > \n> > A second question: at first glance it's intuitively the case we might\n> > not be able to handle byref values. But nodeAgg doesn't seem to have\n> > that restriction. What's the difference here?\n> \n> I think tuplesort_begin_datum, doesn't have any such limitation, it\n> can handle any type of Datum so I think we don't need to consider the\n> only attbyval, we can consider any type of attribute for this\n> optimization.\n\nI've restricted the optimization to byval types because of the following \ncomment in nodeAgg.c:\n\n\t/*\n\t * Note: if input type is pass-by-ref, the datums returned by the \nsort are\n\t * freshly palloc'd in the per-query context, so we must be careful \nto\n\t * pfree them when they are no longer needed.\n\t */\n\nAs I was not sure how to handle that, I prefered the safety of not enabling \nit. Since you both told me it should be safe, I've lifted that restriction \ntoo.\n\n\n> A few small code observations:\n> - In my view the addition of unlikely() in ExecSort is unlikely to be\n> of benefit because it's a single call for the entire node's execution\n> (not in the tuple loop).\n\nDone.\n\n> - It seems clearer to change the \"if (!node->is_single_val)\" to flip\n> the true/false cases so we don't need the negation.\n\nAgreed, done.\n\n> - I assume there are tests that likely already cover this case, but\n> it'd be worth verifying that.\n\nYes many test cases cover that, but maybe it would be better to explictly \ncheck for it on some cases: do you think we could add a debug message that can \nbe checked for ? \n\n> Finally, I believe the same optimization likely ought to be added to\n> nodeIncrementalSort. It's less likely the tests there are sufficient\n> for both this and the original case, but we'll see.\n\nI will look into it, but isn't incrementalsort used to sort tuples on several \nkeys, when they are already sorted on the first ? In that case, I doubt we \nwould ever have a single-valued tuple here, except if there is a projection to \nstrip the tuple from extraneous attributes.\n\n-- \nRonan Dunklau", "msg_date": "Tue, 06 Jul 2021 17:03:32 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Tue, Jul 6, 2021 at 11:03 AM Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n>\n> Thank you for the review, I will address those shortly, but will answer some\n> questions in the meantime.\n>\n> > > First, the changes are lacking any explanatory comments. Probably we\n> > > should follow how nodeAgg does this and add both comments to the\n> > > ExecSort function header as well as specific comments above the \"if\"\n> > > around the new tuplesort_begin_datum explaining the specific\n> > > conditions that are required for the optimization to be useful and\n> > > safe.\n>\n> Done, since I lifted the restrictions following your questions, there isn't\n> much left to comment. (see below)\n>\n> > >\n> > > That leads to a question I had: I don't follow why bounded mode (when\n> > > using byval) needs to be excluded. Comments should be added if there's\n> > > a good reason (as noted above), but maybe it's a case we can handle\n> > > safely?\n>\n> I had test failures when trying to move the Datum around when performing a\n> bounded sort, but did not look into it at first.\n>\n> Now I've looked into it, and the switch to a heapsort when using bounded mode\n> just unconditionnaly tried to free a tuple that was never there to begin with.\n> So if the SortTuple does not contain an actual tuple, but only a single datum,\n> do not do that.\n>\n> I've updated the patch to fix this and enable the optimization in the case of\n> bounded sort.\n\nAwesome.\n\n> > > A second question: at first glance it's intuitively the case we might\n> > > not be able to handle byref values. But nodeAgg doesn't seem to have\n> > > that restriction. What's the difference here?\n> >\n> > I think tuplesort_begin_datum, doesn't have any such limitation, it\n> > can handle any type of Datum so I think we don't need to consider the\n> > only attbyval, we can consider any type of attribute for this\n> > optimization.\n>\n> I've restricted the optimization to byval types because of the following\n> comment in nodeAgg.c:\n>\n> /*\n> * Note: if input type is pass-by-ref, the datums returned by the\n> sort are\n> * freshly palloc'd in the per-query context, so we must be careful\n> to\n> * pfree them when they are no longer needed.\n> */\n>\n> As I was not sure how to handle that, I prefered the safety of not enabling\n> it. Since you both told me it should be safe, I've lifted that restriction\n> too.\n\nTo be clear, I don't know for certain it's safe [without extra work],\nbut even if it involves some extra manual pfree'ing (a la nodeAgg)\nit's probably worth it. Maybe someone else will weigh in on whether or\nnot anything special is required here to ensure we don't leak memory\n(I haven't looked in detail yet).\n\n> > A few small code observations:\n> > - In my view the addition of unlikely() in ExecSort is unlikely to be\n> > of benefit because it's a single call for the entire node's execution\n> > (not in the tuple loop).\n>\n> Done.\n>\n> > - It seems clearer to change the \"if (!node->is_single_val)\" to flip\n> > the true/false cases so we don't need the negation.\n>\n> Agreed, done.\n\nThanks\n\n> > - I assume there are tests that likely already cover this case, but\n> > it'd be worth verifying that.\n>\n> Yes many test cases cover that, but maybe it would be better to explictly\n> check for it on some cases: do you think we could add a debug message that can\n> be checked for ?\n\nMostly I think we should verify code coverage and _maybe_ add a\nspecific test or two that we know execises this path. I don't know\nthat the debug message needs to be matched in the test (probably more\npain than it's worth), but the debug message (\"enabling datum sort\noptimizaton\" or similar) might be good anyway.\n\nI wonder if we need to change costing of sorts for this case. I don't\nlike having to do so, but it's a significant change in speed, so\nprobably should impact what plan gets chosen. Hopefully others will\nweigh on this also.\n\n> > Finally, I believe the same optimization likely ought to be added to\n> > nodeIncrementalSort. It's less likely the tests there are sufficient\n> > for both this and the original case, but we'll see.\n>\n> I will look into it, but isn't incrementalsort used to sort tuples on several\n> keys, when they are already sorted on the first ? In that case, I doubt we\n> would ever have a single-valued tuple here, except if there is a projection to\n> strip the tuple from extraneous attributes.\n\nYes and no. When incremental sort has to do a full sort there will\nalways be at least 2 attributes. But in prefix sort mode (see\nprefixsort_state) only non-presorted columns are sorted (i.e., if\ngiven a,b already sorted by a, then only b is sorted). So the\nprefixsort_state could use this optimization.\n\nJames\n\n\n", "msg_date": "Tue, 6 Jul 2021 11:37:53 -0400", "msg_from": "James Coleman <jtc331@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Le mardi 6 juillet 2021, 17:37:53 CEST James Coleman a écrit :\n> Yes and no. When incremental sort has to do a full sort there will\n> always be at least 2 attributes. But in prefix sort mode (see\n> prefixsort_state) only non-presorted columns are sorted (i.e., if\n> given a,b already sorted by a, then only b is sorted). So the\n> prefixsort_state could use this optimization.\n\nThe optimization is not when we actually sort on a single key, but when we get \na single attribute in / out of the tuplesort. Since sorting always add \nresjunk entries for the keys being sorted on, I don't think we can ever end up \nin a situation where the optimization would kick in, since the entries for the \nalready-performed-sort keys will need to be present in the output.\n\nMaybe if instead of adding resjunk entries to the whole query's targetlist, \nsort and incrementalsort nodes were able to do a projection from the input \n(needed tle + resjunk sorting tle) to a tuple containing only the needed tle \non output before actually sorting it, it would be possible, but that would be \nquite a big design change.\n\nIn the meantime I fixed some formatting issues, please find attached a new \npatch.\n\n\n-- \nRonan Dunklau", "msg_date": "Wed, 07 Jul 2021 11:32:03 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Wed, 7 Jul 2021 at 21:32, Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n> In the meantime I fixed some formatting issues, please find attached a new\n> patch.\n\nI started to look at this.\n\nFirst I wondered how often we might be able to apply this\noptimisation, so I ran make check after adding some elog(NOTICE) calls\nto output which method is going to be used just before we do the\ntuplestore_begin_* calls. It looks like there are 614 instances of\nDatum sorts and 4223 of tuple sorts. That's about 14.5% datum sorts.\n223 of the 614 are byval types and the other 391 are byref. Not that\nthe regression tests are a good reflection of the real world, but if\nit were then that's quite a good number of cases to be able to\noptimise.\n\nAs for the patch, just a few things:\n\n1. Can you add the missing braces in this if condition and the else\ncondition that belongs to it.\n\n+ if (node->is_single_val)\n+ for (;;)\n+ {\n\n2. I think it would nicer to name the new is_single_val field\n\"datumSort\" instead. To me it seems more clear what it is for.\n\n3. This seems to be a bug fix where byval datum sorts do not properly\nhandle bounded sorts. I think that maybe that should be fixed and\nbackpatched. I don't see anything that says Datum sorts can't be\nbounded and if there were some restriction on that I'd expect\ntuplesort_set_bound() to fail when the Tuplesortstate had been set up\nwith tuplesort_begin_datum().\n\n static void\n free_sort_tuple(Tuplesortstate *state, SortTuple *stup)\n {\n- FREEMEM(state, GetMemoryChunkSpace(stup->tuple));\n- pfree(stup->tuple);\n+ /*\n+ * If the SortTuple is actually only a single Datum, which was not copied\n+ * as it is a byval type, do not try to free it nor account for it in\n+ * memory used.\n+ */\n+ if (stup->tuple)\n+ {\n+ FREEMEM(state, GetMemoryChunkSpace(stup->tuple));\n+ pfree(stup->tuple);\n+ }\n\nI can take this to another thread.\n\nThat's all I have for now.\n\nDavid\n\n\n", "msg_date": "Tue, 13 Jul 2021 01:11:17 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Le lundi 12 juillet 2021, 15:11:17 CEST David Rowley a écrit :\n> On Wed, 7 Jul 2021 at 21:32, Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n> > In the meantime I fixed some formatting issues, please find attached a new\n> > patch.\n> \n> I started to look at this.\n\nThank you ! I'm attaching a new version of the patch taking your remarks into \naccount.\n> \n> First I wondered how often we might be able to apply this\n> optimisation, so I ran make check after adding some elog(NOTICE) calls\n> to output which method is going to be used just before we do the\n> tuplestore_begin_* calls. It looks like there are 614 instances of\n> Datum sorts and 4223 of tuple sorts. That's about 14.5% datum sorts.\n> 223 of the 614 are byval types and the other 391 are byref. Not that\n> the regression tests are a good reflection of the real world, but if\n> it were then that's quite a good number of cases to be able to\n> optimise.\n\nThat's an interesting stat.\n\n> \n> As for the patch, just a few things:\n> \n> 1. Can you add the missing braces in this if condition and the else\n> condition that belongs to it.\n> \n> + if (node->is_single_val)\n> + for (;;)\n> + {\n> \n\nDone.\n\n> 2. I think it would nicer to name the new is_single_val field\n> \"datumSort\" instead. To me it seems more clear what it is for.\n\nDone.\n\n> \n> 3. This seems to be a bug fix where byval datum sorts do not properly\n> handle bounded sorts. I think that maybe that should be fixed and\n> backpatched. I don't see anything that says Datum sorts can't be\n> bounded and if there were some restriction on that I'd expect\n> tuplesort_set_bound() to fail when the Tuplesortstate had been set up\n> with tuplesort_begin_datum().\n\nI've kept this as-is for now, but I will remove it from my patch if it is \ndeemed worthy of back-patching in your other thread. \n\nRegards,\n\n-- \nRonan Dunklau", "msg_date": "Mon, 12 Jul 2021 15:59:42 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Tue, 13 Jul 2021 at 01:59, Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n> > 3. This seems to be a bug fix where byval datum sorts do not properly\n> > handle bounded sorts. I think that maybe that should be fixed and\n> > backpatched. I don't see anything that says Datum sorts can't be\n> > bounded and if there were some restriction on that I'd expect\n> > tuplesort_set_bound() to fail when the Tuplesortstate had been set up\n> > with tuplesort_begin_datum().\n>\n> I've kept this as-is for now, but I will remove it from my patch if it is\n> deemed worthy of back-patching in your other thread.\n\nI've now pushed that bug fix so it's fine to remove the change to\ntuplesort.c now.\n\nI also did a round of benchmarking on this patch using the attached\nscript. Anyone wanting to run it will need to run make installcheck\nfirst to create the required tables.\n\nOn an AMD machine, I got the following results.\n\nResult in transactions per second.\nTest master v5 patch compare\nTest1 446.1 657.3 147.32%\nTest2 315.8 314.0 99.44%\nTest3 302.3 392.1 129.67%\nTest4 232.7 230.7 99.12%\nTest5 230.0 446.1 194.00%\nTest6 199.5 217.9 109.23%\nTest7 188.7 185.3 98.21%\nTest8 385.4 544.0 141.17%\n\nTests 2, 4, 7 are designed to check if there is any regression from\ndoing the additional run-time checks to see if we're doing datumSort.\nI measured a very small penalty from this. It's most visible in test7\nwith a drop of about 1.8%. Each test did OFFSET 1000000 as I didn't\nwant to measure the overhead of outputting tuples.\n\nAll the other tests show a pretty good gain. Test6 is testing a byref\ntype, so it appears the gains are not just from byval datums.\n\nIt would be good to see the benchmark script run on a few other\nmachines to get an idea if the gains and losses are consistent.\n\nIn theory, we likely could get rid of the small regression by having\ntwo versions of ExecSort() and setting the correct one during\nExecInitSort() by setting the function pointer to the version we want\nto use in sortstate->ss.ps.ExecProcNode. But maybe the small\nregression is not worth going to that trouble over. I'm not aware of\nany other executor nodes that have logic like that so maybe it would\nbe a bad idea to introduce something like that.\n\nDavid", "msg_date": "Tue, 13 Jul 2021 15:15:24 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "> I've now pushed that bug fix so it's fine to remove the change to\n> tuplesort.c now.\n\nThanks, I've rebased the patch, please find attached the v6.\n\n> \n> I also did a round of benchmarking on this patch using the attached\n> script. Anyone wanting to run it will need to run make installcheck\n> first to create the required tables.\n\nI've run your benchmark, keeping the best of three runs each time.\nThis is an intel laptop, so as many things are running on it there is a lot of \nnoise... \n\nBoth standard and patched run come from a compilation with gcc -O2. No changes \nhave been done to the default settings.\n\nQuery #\tMaster\tPatched\tVariation\n1\t884\t1627\t184.05%\n2\t364\t375\t103.02%\n3\t568\t783\t137.85%\n4\t296\t297\t100.34%\n5\t421\t484\t114.96%\n6\t359\t408\t113.65%\n7\t237\t251\t105.91%\n8\t806\t1271\t157.69%\n\nSince I didn't reproduce your slowdown at all on the first run, I tried to \nrerun the benchmark several times and for the \"dubious cases\" (2, 4 and 7), \nthe results are too jittery to conclude one way or another in my case. I \ndon't have access to proper hardware, so not sure if that would be useful in \nany way to just run the bench for thousands of xacts instead. I would be \nsurprised the check adds that much to the whole execution though.\n\nI attach a graph similar to yours for reference.\n\n\n-- \nRonan Dunklau", "msg_date": "Tue, 13 Jul 2021 09:19:37 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em ter., 13 de jul. de 2021 às 04:19, Ronan Dunklau <ronan.dunklau@aiven.io>\nescreveu:\n\n> > I've now pushed that bug fix so it's fine to remove the change to\n>\n>\n\n> I would be\n> surprised the check adds that much to the whole execution though.\n>\nI think this branch is a misprediction.\nIn most cases is it not datumSort?\nThat's why I would like to use unlikely.\n\nIMO all the tests should all be to verify past behavior first.\n\nregards,\nRanier Vilela\n\nEm ter., 13 de jul. de 2021 às 04:19, Ronan Dunklau <ronan.dunklau@aiven.io> escreveu:> I've now pushed that bug fix so it's fine to remove the change to  I would be \nsurprised the check adds that much to the whole execution though.I think this branch is a misprediction.In most cases is it not datumSort?That's why I would like to use unlikely.IMO all the tests should all be to verify past behavior first.regards,Ranier Vilela", "msg_date": "Tue, 13 Jul 2021 09:05:56 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Wed, 14 Jul 2021 at 00:06, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> Em ter., 13 de jul. de 2021 às 04:19, Ronan Dunklau <ronan.dunklau@aiven.io> escreveu:\n>> I would be\n>> surprised the check adds that much to the whole execution though.\n>\n> I think this branch is a misprediction.\n\nIt could be. I wondered that myself when I saw Ronan's results were\nbetter than mine for 2,4 and 7. However, I think Ronan had quite a\nbit of noise in his results as there's no reason for the speedup in\ntests 2,4 and 7.\n\n> In most cases is it not datumSort?\n\nwho knows. Maybe someone's workload always requires the datum sort.\n\n> That's why I would like to use unlikely.\n\nWe really only use unlikely() in cases where we want to move code out\nof line to a cold area because it's really never executed under normal\ncircumstances. We tend to do that for ERROR cases as we don't ever\nreally want to optimise for errors. We also sometimes do it when some\nfunction has a branch to initialise something during the first call.\nThe case in question here does not fit for either of those two cases.\n\n> IMO all the tests should all be to verify past behavior first.\n\nI'm not quire sure what you mean there.\n\nDavid\n\n\n", "msg_date": "Wed, 14 Jul 2021 00:24:03 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em ter., 13 de jul. de 2021 às 09:24, David Rowley <dgrowleyml@gmail.com>\nescreveu:\n\n> On Wed, 14 Jul 2021 at 00:06, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> >\n> > Em ter., 13 de jul. de 2021 às 04:19, Ronan Dunklau <\n> ronan.dunklau@aiven.io> escreveu:\n> >> I would be\n> >> surprised the check adds that much to the whole execution though.\n> >\n> > I think this branch is a misprediction.\n>\n> It could be. I wondered that myself when I saw Ronan's results were\n> better than mine for 2,4 and 7. However, I think Ronan had quite a\n> bit of noise in his results as there's no reason for the speedup in\n> tests 2,4 and 7.\n\n\n> > In most cases is it not datumSort?\n>\n> who knows. Maybe someone's workload always requires the datum sort.\n>\n> > That's why I would like to use unlikely.\n>\n> We really only use unlikely() in cases where we want to move code out\n> of line to a cold area because it's really never executed under normal\n> circumstances. We tend to do that for ERROR cases as we don't ever\n> really want to optimise for errors. We also sometimes do it when some\n> function has a branch to initialise something during the first call.\n> The case in question here does not fit for either of those two cases.\n>\nHum, I understand the usage cases now.\nThanks for the hint.\n\n\n>\n> > IMO all the tests should all be to verify past behavior first.\n>\n> I'm not quire sure what you mean there.\n>\nI'm saying we could help the branch by keeping the same testing logic as\nbefore and not reversing it.\nAttached is a version to demonstrate this, I don't pretend to be v7.\n\nI couldn't find a good phrase to the contrary:\n\"are we *not* using the single value optimization ?\"\n\nI don't have time to take the tests right now.\n\nregards,\nRanier Vilela", "msg_date": "Tue, 13 Jul 2021 09:44:03 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em ter., 13 de jul. de 2021 às 09:44, Ranier Vilela <ranier.vf@gmail.com>\nescreveu:\n\n> Em ter., 13 de jul. de 2021 às 09:24, David Rowley <dgrowleyml@gmail.com>\n> escreveu:\n>\n>> On Wed, 14 Jul 2021 at 00:06, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>> >\n>> > Em ter., 13 de jul. de 2021 às 04:19, Ronan Dunklau <\n>> ronan.dunklau@aiven.io> escreveu:\n>> >> I would be\n>> >> surprised the check adds that much to the whole execution though.\n>> >\n>> > I think this branch is a misprediction.\n>>\n>> It could be. I wondered that myself when I saw Ronan's results were\n>> better than mine for 2,4 and 7. However, I think Ronan had quite a\n>> bit of noise in his results as there's no reason for the speedup in\n>> tests 2,4 and 7.\n>\n>\n>> > In most cases is it not datumSort?\n>>\n>> who knows. Maybe someone's workload always requires the datum sort.\n>>\n>> > That's why I would like to use unlikely.\n>>\n>> We really only use unlikely() in cases where we want to move code out\n>> of line to a cold area because it's really never executed under normal\n>> circumstances. We tend to do that for ERROR cases as we don't ever\n>> really want to optimise for errors. We also sometimes do it when some\n>> function has a branch to initialise something during the first call.\n>> The case in question here does not fit for either of those two cases.\n>>\n> Hum, I understand the usage cases now.\n> Thanks for the hint.\n>\n>\n>>\n>> > IMO all the tests should all be to verify past behavior first.\n>>\n>> I'm not quire sure what you mean there.\n>>\n> I'm saying we could help the branch by keeping the same testing logic as\n> before and not reversing it.\n> Attached is a version to demonstrate this, I don't pretend to be v7.\n>\n> I couldn't find a good phrase to the contrary:\n> \"are we *not* using the single value optimization ?\"\n>\n> I don't have time to take the tests right now.\n>\nFinally I had time to benchmark (David's benchsort.sh)\n\nubuntu 64 bits (20.04) 8gb ram SSD 256GB.\nTable with the best results of each.\n\n\n HEAD v6 v7 v7b v6 vs\nmaster v7 vs v6 v7b vs v6\nTest1 288,149636 449,018541 469,757169 550,48505 155,83% 104,62% 122,60%\nTest2 94,766955 95,451406 94,556249 94,718982 100,72% 99,06% 99,23%\nTest3 190,521319 260,279802 259,597067 278,115296 136,61% 99,74% 106,85%\nTest4 78,779344 78,253455 78,114068 77,941482 99,33% 99,82% 99,60%\nTest5 131,362614 142,662223 136,436347 149,639041 108,60% 95,64% 104,89%\nTest6 112,884298 124,181671 115,528328 127,58497 110,01% 93,03% 102,74%\nTest7 69,308587 68,643067 66,10195 69,087544 99,04% 96,30% 100,65%\nTest8 243,674171 364,681142 371,928453 419,259703 149,66% 101,99% 114,97%\n\nI have no idea why v7 failed with test6?\nv6 slowdown with test4 and test7.\nv7b slowdown with test2 and test4, in relation with v7.\n\nIf field struct datumSort is not absolutely necessary, I think that v7 will\nbe better.\nAttached the patchs and file results.", "msg_date": "Tue, 13 Jul 2021 14:42:13 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em ter., 13 de jul. de 2021 às 14:42, Ranier Vilela <ranier.vf@gmail.com>\nescreveu:\n\n> Em ter., 13 de jul. de 2021 às 09:44, Ranier Vilela <ranier.vf@gmail.com>\n> escreveu:\n>\n>> Em ter., 13 de jul. de 2021 às 09:24, David Rowley <dgrowleyml@gmail.com>\n>> escreveu:\n>>\n>>> On Wed, 14 Jul 2021 at 00:06, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>>> >\n>>> > Em ter., 13 de jul. de 2021 às 04:19, Ronan Dunklau <\n>>> ronan.dunklau@aiven.io> escreveu:\n>>> >> I would be\n>>> >> surprised the check adds that much to the whole execution though.\n>>> >\n>>> > I think this branch is a misprediction.\n>>>\n>>> It could be. I wondered that myself when I saw Ronan's results were\n>>> better than mine for 2,4 and 7. However, I think Ronan had quite a\n>>> bit of noise in his results as there's no reason for the speedup in\n>>> tests 2,4 and 7.\n>>\n>>\n>>> > In most cases is it not datumSort?\n>>>\n>>> who knows. Maybe someone's workload always requires the datum sort.\n>>>\n>>> > That's why I would like to use unlikely.\n>>>\n>>> We really only use unlikely() in cases where we want to move code out\n>>> of line to a cold area because it's really never executed under normal\n>>> circumstances. We tend to do that for ERROR cases as we don't ever\n>>> really want to optimise for errors. We also sometimes do it when some\n>>> function has a branch to initialise something during the first call.\n>>> The case in question here does not fit for either of those two cases.\n>>>\n>> Hum, I understand the usage cases now.\n>> Thanks for the hint.\n>>\n>>\n>>>\n>>> > IMO all the tests should all be to verify past behavior first.\n>>>\n>>> I'm not quire sure what you mean there.\n>>>\n>> I'm saying we could help the branch by keeping the same testing logic as\n>> before and not reversing it.\n>> Attached is a version to demonstrate this, I don't pretend to be v7.\n>>\n>> I couldn't find a good phrase to the contrary:\n>> \"are we *not* using the single value optimization ?\"\n>>\n>> I don't have time to take the tests right now.\n>>\n> Finally I had time to benchmark (David's benchsort.sh)\n>\n> ubuntu 64 bits (20.04) 8gb ram SSD 256GB.\n> Table with the best results of each.\n>\n>\n> HEAD v6 v7 v7b v6 vs\n> master v7 vs v6 v7b vs v6\n> Test1 288,149636 449,018541 469,757169 550,48505 155,83% 104,62% 122,60%\n> Test2 94,766955 95,451406 94,556249 94,718982 100,72% 99,06% 99,23%\n> Test3 190,521319 260,279802 259,597067 278,115296 136,61% 99,74% 106,85%\n> Test4 78,779344 78,253455 78,114068 77,941482 99,33% 99,82% 99,60%\n> Test5 131,362614 142,662223 136,436347 149,639041 108,60% 95,64% 104,89%\n> Test6 112,884298 124,181671 115,528328 127,58497 110,01% 93,03% 102,74%\n> Test7 69,308587 68,643067 66,10195 69,087544 99,04% 96,30% 100,65%\n> Test8 243,674171 364,681142 371,928453 419,259703 149,66% 101,99% 114,97%\n>\n> I have no idea why v7 failed with test6?\n> v6 slowdown with test4 and test7.\n> v7b slowdown with test2 and test4, in relation with v7.\n>\n v7b slowdown with test2 and test4, in relation with *v6*.\n\n\n> If field struct datumSort is not absolutely necessary, I think that v7\n> will be better.\n>\n *v7b* will be better.\n\nSorry for the noise.\n\nregards,\nRanier Vilela\n\nEm ter., 13 de jul. de 2021 às 14:42, Ranier Vilela <ranier.vf@gmail.com> escreveu:Em ter., 13 de jul. de 2021 às 09:44, Ranier Vilela <ranier.vf@gmail.com> escreveu:Em ter., 13 de jul. de 2021 às 09:24, David Rowley <dgrowleyml@gmail.com> escreveu:On Wed, 14 Jul 2021 at 00:06, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> Em ter., 13 de jul. de 2021 às 04:19, Ronan Dunklau <ronan.dunklau@aiven.io> escreveu:\n>> I would be\n>> surprised the check adds that much to the whole execution though.\n>\n> I think this branch is a misprediction.\n\nIt could be.  I wondered that myself when I saw Ronan's results were\nbetter than mine for 2,4 and 7.  However, I think Ronan had quite a\nbit of noise in his results as there's no reason for the speedup in\ntests 2,4 and 7. \n\n> In most cases is it not datumSort?\n\nwho knows.  Maybe someone's workload always requires the datum sort.\n\n> That's why I would like to use unlikely.\n\nWe really only use unlikely() in cases where we want to move code out\nof line to a cold area because it's really never executed under normal\ncircumstances. We tend to do that for ERROR cases as we don't ever\nreally want to optimise for errors. We also sometimes do it when some\nfunction has a branch to initialise something during the first call.\nThe case in question here does not fit for either of those two cases.Hum, I understand the usage cases now.Thanks for the hint. \n\n> IMO all the tests should all be to verify past behavior first.\n\nI'm not quire sure what you mean there.I'm saying we could help the branch by keeping the same testing logic as before and not reversing it.Attached is a version to demonstrate this, I don't pretend to be v7.I couldn't find a good phrase to the contrary: \"are we *not* using the single value optimization ?\"I don't have time to take the tests right now.Finally I had time to benchmark (David's benchsort.sh)ubuntu 64 bits (20.04) 8gb ram SSD 256GB.Table with the best results of each.\n\n\n\n\n\n\n         HEAD\n           v6\n           v7\n            v7b\n        v6 vs master\n              v7 vs v6\n           v7b vs v6\n\n\nTest1\n288,149636\n449,018541\n469,757169\n550,48505\n155,83%\n104,62%\n122,60%\n\n\nTest2\n94,766955\n95,451406\n94,556249\n94,718982\n100,72%\n99,06%\n99,23%\n\n\nTest3\n190,521319\n260,279802\n259,597067\n278,115296\n136,61%\n99,74%\n106,85%\n\n\nTest4\n78,779344\n78,253455\n78,114068\n77,941482\n99,33%\n99,82%\n99,60%\n\n\nTest5\n131,362614\n142,662223\n136,436347\n149,639041\n108,60%\n95,64%\n104,89%\n\n\nTest6\n112,884298\n124,181671\n115,528328\n127,58497\n110,01%\n93,03%\n102,74%\n\n\nTest7\n69,308587\n68,643067\n66,10195\n69,087544\n99,04%\n96,30%\n100,65%\n\n\nTest8\n243,674171\n364,681142\n371,928453\n419,259703\n149,66%\n101,99%\n114,97%\n\n\nI have no idea why v7 failed with test6?v6 slowdown with test4 and test7.v7b slowdown with test2 and test4, in relation with v7. v7b slowdown with test2 and test4, in relation with *v6*.If field struct datumSort is not absolutely necessary, I think that v7 will be better. *v7b* will be better.Sorry for the noise.regards,Ranier Vilela", "msg_date": "Tue, 13 Jul 2021 14:46:17 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Tue, 13 Jul 2021 at 15:15, David Rowley <dgrowleyml@gmail.com> wrote:\n> In theory, we likely could get rid of the small regression by having\n> two versions of ExecSort() and setting the correct one during\n> ExecInitSort() by setting the function pointer to the version we want\n> to use in sortstate->ss.ps.ExecProcNode.\n\nJust to see how it would perform, I tried what I mentioned above. I've\nincluded what I ended up with in the attached POC patch.\n\nI got the following results on my AMD hardware.\n\nTest master v8 patch comparison\nTest1 448.0 671.7 149.9%\nTest2 316.4 317.5 100.3%\nTest3 299.5 381.6 127.4%\nTest4 219.7 229.5 104.5%\nTest5 226.3 254.6 112.5%\nTest6 197.9 217.9 110.1%\nTest7 179.2 185.3 103.4%\nTest8 389.2 544.8 140.0%\n\nThis time I saw no regression on tests 2, 4 and 7.\n\nI looked to see if there was anywhere else in the executor that\nconditionally uses a different exec function in this way and found\nnothing, so I'm not too sure if it's a good idea to start doing this.\n\nIt would be good to get a 2nd opinion about this idea. Also, more\nbenchmark results with v6 and v8 would be good too.\n\nDavid", "msg_date": "Wed, 14 Jul 2021 22:13:45 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em qua., 14 de jul. de 2021 às 07:14, David Rowley <dgrowleyml@gmail.com>\nescreveu:\n\n> On Tue, 13 Jul 2021 at 15:15, David Rowley <dgrowleyml@gmail.com> wrote:\n> > In theory, we likely could get rid of the small regression by having\n> > two versions of ExecSort() and setting the correct one during\n> > ExecInitSort() by setting the function pointer to the version we want\n> > to use in sortstate->ss.ps.ExecProcNode.\n>\n> Just to see how it would perform, I tried what I mentioned above. I've\n> included what I ended up with in the attached POC patch.\n>\n> I got the following results on my AMD hardware.\n>\n> Test master v8 patch comparison\n> Test1 448.0 671.7 149.9%\n> Test2 316.4 317.5 100.3%\n> Test3 299.5 381.6 127.4%\n> Test4 219.7 229.5 104.5%\n> Test5 226.3 254.6 112.5%\n> Test6 197.9 217.9 110.1%\n> Test7 179.2 185.3 103.4%\n> Test8 389.2 544.8 140.0%\n>\nI'm a little surprised by your results.\nTest1 and Test8 look pretty good to me.\nWhat is compiler and environment?\n\nI repeated (3 times) the benchmark with v8 here,\nand the results were not good.\n\n\n HEAD v6 v7b v8\nv6 vs head v8 vs v6 v8 vs v7b\nTest1 288,149636 449,018541 550,48505 468,168165 155,83% 104,26% 85,05%\nTest2 94,766955 95,451406 94,718982 94,800275 100,72% 99,32% 100,09%\nTest3 190,521319 260,279802 278,115296 262,538383 136,61% 100,87% 94,40%\nTest4 78,779344 78,253455 77,941482 78,471546 99,33% 100,28% 100,68%\nTest5 131,362614 142,662223 149,639041 144,849303 108,60% 101,53% 96,80%\nTest6 112,884298 124,181671 127,58497 124,29376 110,01% 100,09% 97,42%\nTest7 69,308587 68,643067 69,087544 69,437312 99,04% 101,16% 100,51%\nTest8 243,674171 364,681142 419,259703 369,239176 149,66% 101,25% 88,07%\n\n\n\n> This time I saw no regression on tests 2, 4 and 7.\n>\n> I looked to see if there was anywhere else in the executor that\n> conditionally uses a different exec function in this way and found\n> nothing, so I'm not too sure if it's a good idea to start doing this.\n>\nSpecialized functions can be a way to optimize. The compilers themselves do\nit.\nBut the ExecSortTuple and ExecSortDatum are much more similar,\nwhich can cause maintenance problems.\nI don't think in this case it would be a good idea.\n\n\n>\n> It would be good to get a 2nd opinion about this idea. Also, more\n> benchmark results with v6 and v8 would be good too.\n>\nYeah, another different machine.\nI would like to see other results with v7b.\n\nAttached the file with all results from v8.\n\nregards,\nRanier Vilela", "msg_date": "Wed, 14 Jul 2021 14:55:13 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Wed, Jul 14, 2021 at 6:14 AM David Rowley <dgrowleyml@gmail.com> wrote:\n\n> It would be good to get a 2nd opinion about this idea. Also, more\n> benchmark results with v6 and v8 would be good too.\n\nI tested this on an older Xeon, gcc 8.4 (here median of each test, full\nresults attached):\n\ntest HEAD v6 v8\n\nTest1 588 1007 998\nTest2 198 202 197\nTest3 374 516 512\nTest4 172 165 166\nTest5 255 279 283\nTest6 227 251 251\nTest7 145 147 146\nTest8 474 783 770\n\nTest4 could be a regression, but 2 and 7 look fine here.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 14 Jul 2021 19:30:26 -0400", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Thu, 15 Jul 2021 at 05:55, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> I repeated (3 times) the benchmark with v8 here,\n> and the results were not good.\n\nDo you have any good theories on why the additional branching that's\ndone in v7b vs v8 might cause it to run faster?\n\nDavid\n\n\n", "msg_date": "Thu, 15 Jul 2021 11:43:20 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em qua., 14 de jul. de 2021 às 20:43, David Rowley <dgrowleyml@gmail.com>\nescreveu:\n\n> On Thu, 15 Jul 2021 at 05:55, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> > I repeated (3 times) the benchmark with v8 here,\n> > and the results were not good.\n>\n> Do you have any good theories on why the additional branching that's\n> done in v7b vs v8 might cause it to run faster?\n\n\nBranch Predictions works with *more* probable path,\notherwise a penalty occurs and the cpu must revert the results.\n\nIn this case it seems to me that most of the time, tuplesort is the path.\nSo as it is tested if it is *datumSort* and the *prediction* fails,\nthe cpu has more work to reverse the wrong path.\n\nTo help the branch, test a more probable case first, anywhere.\nif, switch, etc.\n\nAnother gain is the local variable tupleSort, which is obviously faster\nthan node.\n\nregards,\nRanier Vilela\n\nEm qua., 14 de jul. de 2021 às 20:43, David Rowley <dgrowleyml@gmail.com> escreveu:On Thu, 15 Jul 2021 at 05:55, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> I repeated (3 times) the benchmark with v8 here,\n> and the results were not good.\n\nDo you have any good theories on why the additional branching that's\ndone in v7b vs v8 might cause it to run faster? Branch Predictions works with *more* probable path, otherwise a penalty occurs and the cpu must revert the results. In this case it seems to me that most of the time, tuplesort is the path.So as it is tested if it is *datumSort* and the *prediction* fails, the cpu has more work to reverse the wrong path.To help the branch, test a more probable case first, anywhere.if, switch, etc.Another gain is the local variable tupleSort, which is obviously faster than node.regards,Ranier Vilela", "msg_date": "Wed, 14 Jul 2021 21:10:18 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Thu, 15 Jul 2021 at 12:10, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> Em qua., 14 de jul. de 2021 às 20:43, David Rowley <dgrowleyml@gmail.com> escreveu:\n>>\n>> On Thu, 15 Jul 2021 at 05:55, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>> > I repeated (3 times) the benchmark with v8 here,\n>> > and the results were not good.\n>>\n>> Do you have any good theories on why the additional branching that's\n>> done in v7b vs v8 might cause it to run faster?\n>\n>\n> Branch Predictions works with *more* probable path,\n> otherwise a penalty occurs and the cpu must revert the results.\n\nBut, in v8 there is no additional branch, so no branch to mispredict.\nI don't really see how your explanation fits.\n\nIt seems much more likely to me that the results were just noisy. It\nwould be good to see if you can recreate them consistently.\n\nDavid\n\n\n", "msg_date": "Thu, 15 Jul 2021 12:20:58 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em qua., 14 de jul. de 2021 às 21:21, David Rowley <dgrowleyml@gmail.com>\nescreveu:\n\n> On Thu, 15 Jul 2021 at 12:10, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> >\n> > Em qua., 14 de jul. de 2021 às 20:43, David Rowley <dgrowleyml@gmail.com>\n> escreveu:\n> >>\n> >> On Thu, 15 Jul 2021 at 05:55, Ranier Vilela <ranier.vf@gmail.com>\n> wrote:\n> >> > I repeated (3 times) the benchmark with v8 here,\n> >> > and the results were not good.\n> >>\n> >> Do you have any good theories on why the additional branching that's\n> >> done in v7b vs v8 might cause it to run faster?\n> >\n> >\n> > Branch Predictions works with *more* probable path,\n> > otherwise a penalty occurs and the cpu must revert the results.\n>\n> But, in v8 there is no additional branch, so no branch to mispredict.\n> I don't really see how your explanation fits.\n>\nIn v8 the branch occurs at :\n+ if (ExecGetResultType(outerPlanState(sortstate))->natts == 1)\n\ndatumSort is tested first.\n\nCpu time is a more expensive resource.\nAlways is executed two branches, if it is right path, win,\notherwise occurs a penalty time.\n\n\n> It seems much more likely to me that the results were just noisy. It\n> would be good to see if you can recreate them consistently.\n>\nI do.\nCan you please share results with v7b?\n\nregards,\nRanier Vilela\n\nEm qua., 14 de jul. de 2021 às 21:21, David Rowley <dgrowleyml@gmail.com> escreveu:On Thu, 15 Jul 2021 at 12:10, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> Em qua., 14 de jul. de 2021 às 20:43, David Rowley <dgrowleyml@gmail.com> escreveu:\n>>\n>> On Thu, 15 Jul 2021 at 05:55, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>> > I repeated (3 times) the benchmark with v8 here,\n>> > and the results were not good.\n>>\n>> Do you have any good theories on why the additional branching that's\n>> done in v7b vs v8 might cause it to run faster?\n>\n>\n> Branch Predictions works with *more* probable path,\n> otherwise a penalty occurs and the cpu must revert the results.\n\nBut, in v8 there is no additional branch, so no branch to mispredict.\nI don't really see how your explanation fits.In v8 the branch occurs at :+\tif (ExecGetResultType(outerPlanState(sortstate))->natts == 1)datumSort is tested first.Cpu time is a more expensive resource.Always is executed two branches, if it is right path, win,otherwise occurs a penalty time.\n\nIt seems much more likely to me that the results were just noisy.  It\nwould be good to see if you can recreate them consistently.I do.Can you please share results with v7b?regards,Ranier Vilela", "msg_date": "Wed, 14 Jul 2021 21:30:38 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Thu, 15 Jul 2021 at 12:30, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> Em qua., 14 de jul. de 2021 às 21:21, David Rowley <dgrowleyml@gmail.com> escreveu:\n>> But, in v8 there is no additional branch, so no branch to mispredict.\n>> I don't really see how your explanation fits.\n>\n> In v8 the branch occurs at :\n> + if (ExecGetResultType(outerPlanState(sortstate))->natts == 1)\n\nYou do know that branch is in a function that's only executed once\nduring executor initialization, right?\n\nDavid\n\n\n", "msg_date": "Thu, 15 Jul 2021 13:21:51 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em qua., 14 de jul. de 2021 às 22:22, David Rowley <dgrowleyml@gmail.com>\nescreveu:\n\n> On Thu, 15 Jul 2021 at 12:30, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> >\n> > Em qua., 14 de jul. de 2021 às 21:21, David Rowley <dgrowleyml@gmail.com>\n> escreveu:\n> >> But, in v8 there is no additional branch, so no branch to mispredict.\n> >> I don't really see how your explanation fits.\n> >\n> > In v8 the branch occurs at :\n> > + if (ExecGetResultType(outerPlanState(sortstate))->natts == 1)\n>\n> You do know that branch is in a function that's only executed once\n> during executor initialization, right?\n>\nThe branch prediction should work better.\nI have no idea why it works worse.\n\nI redid all tests:\nnotebook 8GB RAM 256GB SSD\nubuntu 64 bits (20.04)\nclang-12\npowerhigh (charger on)\nnone configuration (all defaults)\n\n\n HEAD v6 v7b v8 v6\nvs head\nv7b vs v6 v8 vs v7b\nTest1 576,868013 940,947236 1090,253859 1016,0443 163,11% 115,87% 93,19%\nTest2 184,748363 177,6254 177,346229 178,230258 96,14% 99,84% 100,50%\nTest3 410,030055 541,889704 605,843924 534,946166 132,16% 111,80% 88,30%\nTest4 153,331752 147,98418 148,010894 147,771155 96,51% 100,02% 99,84%\nTest5 268,97555 301,979647 316,928492 300,94932 112,27% 104,95% 94,96%\nTest6 234,910125 259,71483 269,851427 260,567637 110,56% 103,90% 96,56%\nTest7 142,704153 136,09163 136,802695 136,935709 95,37% 100,52% 100,10%\nTest8 498,634855 763,482151 867,350046 804,833884 153,11% 113,60% 92,79%\n\nThe values are high here, because now, the tests are made with full power\nof cpu to all patchs!\nI think that more testing is needed with v7b and v8.\n\nAnyway, two functions (ExecSortTuple and ExecSortDatum) are almost equal,\nmaybe not a good idea.\n\nfile results attached.\n\nregards,\nRanier Vilela", "msg_date": "Wed, 14 Jul 2021 23:55:04 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Le jeudi 15 juillet 2021, 01:30:26 CEST John Naylor a écrit :\n> On Wed, Jul 14, 2021 at 6:14 AM David Rowley <dgrowleyml@gmail.com> wrote:\n> > It would be good to get a 2nd opinion about this idea. Also, more\n> > benchmark results with v6 and v8 would be good too.\n> \n\nHello,\n\nThank you for trying this approach in v8 David !\n\nI've decided to test on more \"stable\" hardware, an EC-2 medium instance, \ncompiling with Debian's gcc 8.3. That's still not ideal but a lot better than \na laptop. \n\nTo gather more meaningful results, I ran every pgbench for 30s instead of the \n10 in the initial script provided by David. I ran the full script once for \nHEAD, v6, v8, then a second time for HEAD, v6, v8 to try to eliminate noise \nthat could happen for 90 consecutive seconds, and took for each of those the \nmedian of the 6 runs. It's much less noisy than my previous runs but still \nnot as as stable as I'd like to.\n\nThe results are attached in graph form, as well as the raw data if someone \nwants it.\n\nAs a conclusion, I don't think it's worth it to introduce a separate \nexecprocnode function for that case. It is likely the minor difference still \nobserved can be explained to noise, as they fluctuate if you compare the min, \nmax, average or median values from the results.\n\nBest regards,\n\n-- \nRonan Dunklau", "msg_date": "Thu, 15 Jul 2021 12:18:22 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em qui., 15 de jul. de 2021 às 07:18, Ronan Dunklau <ronan.dunklau@aiven.io>\nescreveu:\n\n> Le jeudi 15 juillet 2021, 01:30:26 CEST John Naylor a écrit :\n> > On Wed, Jul 14, 2021 at 6:14 AM David Rowley <dgrowleyml@gmail.com>\n> wrote:\n> > > It would be good to get a 2nd opinion about this idea. Also, more\n> > > benchmark results with v6 and v8 would be good too.\n> >\n>\n> Hello,\n>\n> Thank you for trying this approach in v8 David !\n>\n> I've decided to test on more \"stable\" hardware, an EC-2 medium instance,\n> compiling with Debian's gcc 8.3. That's still not ideal but a lot better\n> than\n> a laptop.\n>\n> To gather more meaningful results, I ran every pgbench for 30s instead of\n> the\n> 10 in the initial script provided by David. I ran the full script once for\n> HEAD, v6, v8, then a second time for HEAD, v6, v8 to try to eliminate\n> noise\n> that could happen for 90 consecutive seconds, and took for each of those\n> the\n> median of the 6 runs. It's much less noisy than my previous runs but\n> still\n> not as as stable as I'd like to.\n>\n> The results are attached in graph form, as well as the raw data if someone\n> wants it.\n>\n> As a conclusion, I don't think it's worth it to introduce a separate\n> execprocnode function for that case. It is likely the minor difference\n> still\n> observed can be explained to noise, as they fluctuate if you compare the\n> min,\n> max, average or median values from the results.\n>\nIs there a special reason to not share v7b tests and results?\n\nIMHO he is much more branch friendly.\n\nregards,\nRanier Vilela\n\nEm qui., 15 de jul. de 2021 às 07:18, Ronan Dunklau <ronan.dunklau@aiven.io> escreveu:Le jeudi 15 juillet 2021, 01:30:26 CEST John Naylor a écrit :\n> On Wed, Jul 14, 2021 at 6:14 AM David Rowley <dgrowleyml@gmail.com> wrote:\n> > It would be good to get a 2nd opinion about this idea.  Also, more\n> > benchmark results with v6 and v8 would be good too.\n> \n\nHello,\n\nThank you for trying this approach in v8 David !\n\nI've decided to test on more \"stable\" hardware, an EC-2 medium instance, \ncompiling with Debian's gcc 8.3. That's still not ideal but a lot better than \na laptop. \n\nTo gather more meaningful results, I ran every pgbench for 30s instead of the \n10 in the initial script provided by David. I ran the full script once for \nHEAD, v6, v8, then a second time for HEAD, v6, v8 to try to eliminate noise \nthat could happen for 90 consecutive seconds, and took for each of those the \nmedian of the 6 runs.  It's much less noisy than my previous runs but still \nnot as as stable as I'd like to.\n\nThe results are attached in graph form, as well as the raw data if someone \nwants it.\n\nAs a conclusion, I don't think it's worth it to introduce a separate \nexecprocnode function for that case. It is likely the minor difference still \nobserved can be explained to noise, as they fluctuate if you compare the min, \nmax, average or median values from the results.Is there a special reason to not share v7b tests and results?IMHO he is much more branch friendly.regards,Ranier Vilela", "msg_date": "Thu, 15 Jul 2021 09:09:26 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Le jeudi 15 juillet 2021, 14:09:26 CEST Ranier Vilela a écrit :\n> Is there a special reason to not share v7b tests and results?\n> \n\nThe v7b patch is wrong, as it loses the type of tuplesort being used and as \nsuch always tries to fetch results using tuplesort_gettupleslot after the first \ntuple is fetched. \n\n\n-- \nRonan Dunklau\n\n\n\n\n", "msg_date": "Thu, 15 Jul 2021 14:27:38 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em qui., 15 de jul. de 2021 às 09:27, Ronan Dunklau <ronan.dunklau@aiven.io>\nescreveu:\n\n> Le jeudi 15 juillet 2021, 14:09:26 CEST Ranier Vilela a écrit :\n> > Is there a special reason to not share v7b tests and results?\n> >\n>\n> The v7b patch is wrong, as it loses the type of tuplesort being used\n\nI don't see 'node->datumSort' being anywhere else yet.\n\n\n> and as\n> such always tries to fetch results using tuplesort_gettupleslot after the\n> first\n> tuple is fetched.\n\nIs that why it is faster than v6?\n\nregards,\nRanier Vilela\n\nEm qui., 15 de jul. de 2021 às 09:27, Ronan Dunklau <ronan.dunklau@aiven.io> escreveu:Le jeudi 15 juillet 2021, 14:09:26 CEST Ranier Vilela a écrit :\n> Is there a special reason to not share v7b tests and results?\n> \n\nThe v7b patch is wrong, as it loses the type of tuplesort being usedI don't see 'node->datumSort' being anywhere else yet.  and as \nsuch always tries to fetch results using tuplesort_gettupleslot after the first \ntuple is fetched.Is that why it is faster than v6?regards,Ranier Vilela", "msg_date": "Thu, 15 Jul 2021 09:38:26 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Wed, Jul 14, 2021 at 9:22 PM David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Thu, 15 Jul 2021 at 12:30, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> >\n> > Em qua., 14 de jul. de 2021 às 21:21, David Rowley <dgrowleyml@gmail.com> escreveu:\n> >> But, in v8 there is no additional branch, so no branch to mispredict.\n> >> I don't really see how your explanation fits.\n> >\n> > In v8 the branch occurs at :\n> > + if (ExecGetResultType(outerPlanState(sortstate))->natts == 1)\n>\n> You do know that branch is in a function that's only executed once\n> during executor initialization, right?\n\nThis is why I have a hard time believing there's a \"real\" change here\nand not the result of either noise or something not really\ncontrollable like executable layout changing.\n\nJames\n\n\n", "msg_date": "Thu, 15 Jul 2021 09:44:14 -0400", "msg_from": "James Coleman <jtc331@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Fri, 16 Jul 2021 at 01:44, James Coleman <jtc331@gmail.com> wrote:\n>\n> On Wed, Jul 14, 2021 at 9:22 PM David Rowley <dgrowleyml@gmail.com> wrote:\n> >\n> > On Thu, 15 Jul 2021 at 12:30, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> > >\n> > > Em qua., 14 de jul. de 2021 às 21:21, David Rowley <dgrowleyml@gmail.com> escreveu:\n> > >> But, in v8 there is no additional branch, so no branch to mispredict.\n> > >> I don't really see how your explanation fits.\n> > >\n> > > In v8 the branch occurs at :\n> > > + if (ExecGetResultType(outerPlanState(sortstate))->natts == 1)\n> >\n> > You do know that branch is in a function that's only executed once\n> > during executor initialization, right?\n>\n> This is why I have a hard time believing there's a \"real\" change here\n> and not the result of either noise or something not really\n> controllable like executable layout changing.\n\nYeah, I think we likely are at the level where layout changes in the\ncompiled code are going to make things hard to measure. I just want\nto make sure we're not going to end up with some regression that's\nactual and not random depending on layout changes of unrelated code.\nI think a branch that's taken consistently *should* be predicted\ncorrectly each time.\n\nAnyway, I think all the comparisons with v7b can safely be ignored. As\nRonan pointed out, v7b has some issues due to it not recording the\nsort method in the executor state that leads to it forgetting which\nmethod it used once we start pulling tuples from it. The reproductions\nof that are it calling tuplesort_gettupleslot() from the 2nd tuple\nonwards regardless of if we've done a datum or tuple sort.\n\nRonan's latest results plus John's make me think there's no need to\nseparate out the node function as I did in v8. However, I do think v6\ncould learn a little from v8. I think I'd rather see the sort method\ndetermined in ExecInitSort() rather than ExecSort(). I think\nminimising those few extra instructions in ExecSort() might help the\nL1 instruction cache.\n\nDavid\n\n\n", "msg_date": "Fri, 16 Jul 2021 02:19:23 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em qua., 14 de jul. de 2021 às 22:22, David Rowley <dgrowleyml@gmail.com>\nescreveu:\n\n> On Thu, 15 Jul 2021 at 12:30, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> >\n> > Em qua., 14 de jul. de 2021 às 21:21, David Rowley <dgrowleyml@gmail.com>\n> escreveu:\n> >> But, in v8 there is no additional branch, so no branch to mispredict.\n> >> I don't really see how your explanation fits.\n> >\n> > In v8 the branch occurs at :\n> > + if (ExecGetResultType(outerPlanState(sortstate))->natts == 1)\n>\n> You do know that branch is in a function that's only executed once\n> during executor initialization, right?\n>\nThere's a real difference between v8 and v6, if I understood correctly.\n\nv6 the branches is per tuple:\n+ if (tupDesc->natts == 1)\n\nv8 the branches is per state:\n+ if (ExecGetResultType(outerPlanState(sortstate))->natts == 1)\n\nI think that a big different way to solve the problem.\nOr am I getting it wrong?\n\nIf the sortstate number of attributes is equal to 1, is it worth the same\nfor each tuple?\nCan you explain this, please?\n\nregards,\nRanier Vilela\n\nEm qua., 14 de jul. de 2021 às 22:22, David Rowley <dgrowleyml@gmail.com> escreveu:On Thu, 15 Jul 2021 at 12:30, Ranier Vilela <ranier.vf@gmail.com> wrote:\n>\n> Em qua., 14 de jul. de 2021 às 21:21, David Rowley <dgrowleyml@gmail.com> escreveu:\n>> But, in v8 there is no additional branch, so no branch to mispredict.\n>> I don't really see how your explanation fits.\n>\n> In v8 the branch occurs at :\n> + if (ExecGetResultType(outerPlanState(sortstate))->natts == 1)\n\nYou do know that branch is in a function that's only executed once\nduring executor initialization, right?There's a real difference between v8 and v6, if I understood correctly.v6 the branches is per tuple:+\t\tif (tupDesc->natts == 1)v8 the branches is per state:+\tif (ExecGetResultType(outerPlanState(sortstate))->natts == 1)I think that a big different way to solve the problem.Or am I getting it wrong?If the sortstate number of attributes is equal to 1, is it worth the same for each tuple?Can you explain this, please?regards,Ranier Vilela", "msg_date": "Thu, 15 Jul 2021 11:19:51 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em qui., 15 de jul. de 2021 às 11:19, David Rowley <dgrowleyml@gmail.com>\nescreveu:\n\n> On Fri, 16 Jul 2021 at 01:44, James Coleman <jtc331@gmail.com> wrote:\n> >\n> > On Wed, Jul 14, 2021 at 9:22 PM David Rowley <dgrowleyml@gmail.com>\n> wrote:\n> > >\n> > > On Thu, 15 Jul 2021 at 12:30, Ranier Vilela <ranier.vf@gmail.com>\n> wrote:\n> > > >\n> > > > Em qua., 14 de jul. de 2021 às 21:21, David Rowley <\n> dgrowleyml@gmail.com> escreveu:\n> > > >> But, in v8 there is no additional branch, so no branch to\n> mispredict.\n> > > >> I don't really see how your explanation fits.\n> > > >\n> > > > In v8 the branch occurs at :\n> > > > + if (ExecGetResultType(outerPlanState(sortstate))->natts == 1)\n> > >\n> > > You do know that branch is in a function that's only executed once\n> > > during executor initialization, right?\n> >\n> > This is why I have a hard time believing there's a \"real\" change here\n> > and not the result of either noise or something not really\n> > controllable like executable layout changing.\n>\n> Yeah, I think we likely are at the level where layout changes in the\n> compiled code are going to make things hard to measure. I just want\n> to make sure we're not going to end up with some regression that's\n> actual and not random depending on layout changes of unrelated code.\n> I think a branch that's taken consistently *should* be predicted\n> correctly each time.\n\n\n> Anyway, I think all the comparisons with v7b can safely be ignored. As\n> Ronan pointed out, v7b has some issues due to it not recording the\n> sort method in the executor state that leads to it forgetting which\n> method it used once we start pulling tuples from it. The reproductions\n> of that are it calling tuplesort_gettupleslot() from the 2nd tuple\n> onwards regardless of if we've done a datum or tuple sort.\n>\nSorry for insisting on this.\nAssuming v7b is doing it the wrong way, which I still don't think it is.\nWhy is it still faster than v6 and v8?\n\nregards,\nRanier Vilela\n\nEm qui., 15 de jul. de 2021 às 11:19, David Rowley <dgrowleyml@gmail.com> escreveu:On Fri, 16 Jul 2021 at 01:44, James Coleman <jtc331@gmail.com> wrote:\n>\n> On Wed, Jul 14, 2021 at 9:22 PM David Rowley <dgrowleyml@gmail.com> wrote:\n> >\n> > On Thu, 15 Jul 2021 at 12:30, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> > >\n> > > Em qua., 14 de jul. de 2021 às 21:21, David Rowley <dgrowleyml@gmail.com> escreveu:\n> > >> But, in v8 there is no additional branch, so no branch to mispredict.\n> > >> I don't really see how your explanation fits.\n> > >\n> > > In v8 the branch occurs at :\n> > > + if (ExecGetResultType(outerPlanState(sortstate))->natts == 1)\n> >\n> > You do know that branch is in a function that's only executed once\n> > during executor initialization, right?\n>\n> This is why I have a hard time believing there's a \"real\" change here\n> and not the result of either noise or something not really\n> controllable like executable layout changing.\n\nYeah, I think we likely are at the level where layout changes in the\ncompiled code are going to make things hard to measure.  I just want\nto make sure we're not going to end up with some regression that's\nactual and not random depending on layout changes of unrelated code.\nI think a branch that's taken consistently *should* be predicted\ncorrectly each time.\n\nAnyway, I think all the comparisons with v7b can safely be ignored. As\nRonan pointed out, v7b has some issues due to it not recording the\nsort method in the executor state that leads to it forgetting which\nmethod it used once we start pulling tuples from it. The reproductions\nof that are it calling tuplesort_gettupleslot() from the 2nd tuple\nonwards regardless of if we've done a datum or tuple sort.Sorry for insisting on this.Assuming v7b is doing it the wrong way, which I still don't think it is.Why is it still faster than v6 and v8?regards,Ranier Vilela", "msg_date": "Thu, 15 Jul 2021 11:45:39 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Le jeudi 15 juillet 2021, 16:19:23 CEST David Rowley a écrit :> \n> Ronan's latest results plus John's make me think there's no need to\n> separate out the node function as I did in v8. However, I do think v6\n> could learn a little from v8. I think I'd rather see the sort method\n> determined in ExecInitSort() rather than ExecSort(). I think\n> minimising those few extra instructions in ExecSort() might help the\n> L1 instruction cache.\n> \n\nI'm not sure I understand what you expect from moving that to ExecInitSort ? \nMaybe we should also implement the tuplesort_state initialization in \nExecInitSort ? (not the actual feeding and sorting of course).\n\nPlease find attached a v9 just moving the flag setting to ExecInitSort, and my \napologies if I misunderstood your point.\n\n-- \nRonan Dunklau", "msg_date": "Thu, 15 Jul 2021 16:53:29 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Thu, Jul 15, 2021 at 10:19 AM David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Fri, 16 Jul 2021 at 01:44, James Coleman <jtc331@gmail.com> wrote:\n> >\n> > On Wed, Jul 14, 2021 at 9:22 PM David Rowley <dgrowleyml@gmail.com> wrote:\n> > >\n> > > On Thu, 15 Jul 2021 at 12:30, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> > > >\n> > > > Em qua., 14 de jul. de 2021 às 21:21, David Rowley <dgrowleyml@gmail.com> escreveu:\n> > > >> But, in v8 there is no additional branch, so no branch to mispredict.\n> > > >> I don't really see how your explanation fits.\n> > > >\n> > > > In v8 the branch occurs at :\n> > > > + if (ExecGetResultType(outerPlanState(sortstate))->natts == 1)\n> > >\n> > > You do know that branch is in a function that's only executed once\n> > > during executor initialization, right?\n> >\n> > This is why I have a hard time believing there's a \"real\" change here\n> > and not the result of either noise or something not really\n> > controllable like executable layout changing.\n>\n> Yeah, I think we likely are at the level where layout changes in the\n> compiled code are going to make things hard to measure. I just want\n> to make sure we're not going to end up with some regression that's\n> actual and not random depending on layout changes of unrelated code.\n> I think a branch that's taken consistently *should* be predicted\n> correctly each time.\n>\n> Anyway, I think all the comparisons with v7b can safely be ignored. As\n> Ronan pointed out, v7b has some issues due to it not recording the\n> sort method in the executor state that leads to it forgetting which\n> method it used once we start pulling tuples from it. The reproductions\n> of that are it calling tuplesort_gettupleslot() from the 2nd tuple\n> onwards regardless of if we've done a datum or tuple sort.\n>\n> Ronan's latest results plus John's make me think there's no need to\n> separate out the node function as I did in v8. However, I do think v6\n> could learn a little from v8. I think I'd rather see the sort method\n> determined in ExecInitSort() rather than ExecSort(). I think\n> minimising those few extra instructions in ExecSort() might help the\n> L1 instruction cache.\n\nI ran master/v6/v8 tests for 90s each with David's test script on an\nAWS c5n.metal instance (so should be immune to noise neighbor issues).\nHere are comparative results:\n\n Test1 Test2 Test3 Test4 Test5 Test6 Test7 Test8\nv6 68.66% 0.05% 32.21% -0.83% 12.58% 10.42% -1.48% 50.98%\nv8 69.78% -0.44% 32.45% -1.11% 12.01% 10.58% -1.40% 49.30%\n\nSo I see a consistent change in the data, but I don't really see a\ngood explanation for it not being noise. Can't prove that yet though.\n\nJames\n\n\n", "msg_date": "Thu, 15 Jul 2021 13:00:36 -0400", "msg_from": "James Coleman <jtc331@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Fri, 16 Jul 2021 at 02:53, Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n>\n> Le jeudi 15 juillet 2021, 16:19:23 CEST David Rowley a écrit :>\n> > Ronan's latest results plus John's make me think there's no need to\n> > separate out the node function as I did in v8. However, I do think v6\n> > could learn a little from v8. I think I'd rather see the sort method\n> > determined in ExecInitSort() rather than ExecSort(). I think\n> > minimising those few extra instructions in ExecSort() might help the\n> > L1 instruction cache.\n> >\n>\n> I'm not sure I understand what you expect from moving that to ExecInitSort ?\n\nThe motivation was to reduce the extra code that's being added to\nExecSort. I checked the assembly of ExecSort on v6 and v9 and v6 was\n544 lines of assembly and v9 is 534 lines.\n\n> Maybe we should also implement the tuplesort_state initialization in\n> ExecInitSort ? (not the actual feeding and sorting of course).\n\nI don't think that would be a good idea. Setting the datumSort does\nnot require any new memory to be allocated. That's not the case for\nthe tuplesort_begin routines. The difference here is that we can\ndelay the memory allocation until we pull the first tuple and if we\ndon't pull any tuples from the outer node then there are no needless\nallocations.\n\n> Please find attached a v9 just moving the flag setting to ExecInitSort, and my\n> apologies if I misunderstood your point.\n\nThat's exactly what I meant. Thanks\n\nDavid\n\n\n", "msg_date": "Fri, 16 Jul 2021 14:16:21 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Fri, 16 Jul 2021 at 02:53, Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n> Please find attached a v9 just moving the flag setting to ExecInitSort, and my\n> apologies if I misunderstood your point.\n\nI took this and adjusted a few things and ended up with the attached patch.\n\nThe changes are fairly minor. I made the bracing consistent between\nboth tuplesort_begin calls. I rewrote the comment at the top of\nExecSort() to make it more clear about each method used.\n\nI also adjusted the comment down at the end of ExecSort that was\nmentioning something about tuplesort_gettupleslot returning NULL.\nYour patch didn't touch this, but to me, the comment just looked wrong\nboth before and after the changes. tuplesort_gettupleslot returns\nfalse and sets the slot to empty when it runs out of tuples. Anyway,\nI wrote something there that I think improves that.\n\nI feel like this patch is commit-worthy now. However, I'll leave it\nfor a few days, maybe until after the weekend as there's been a fair\nbit of interest and I imagine someone will have comments to make.\n\nDavid", "msg_date": "Fri, 16 Jul 2021 15:44:49 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em sex., 16 de jul. de 2021 às 00:45, David Rowley <dgrowleyml@gmail.com>\nescreveu:\n\n> On Fri, 16 Jul 2021 at 02:53, Ronan Dunklau <ronan.dunklau@aiven.io>\n> wrote:\n> > Please find attached a v9 just moving the flag setting to ExecInitSort,\n> and my\n> > apologies if I misunderstood your point.\n>\n> I took this and adjusted a few things and ended up with the attached patch.\n>\n> The changes are fairly minor. I made the bracing consistent between\n> both tuplesort_begin calls. I rewrote the comment at the top of\n> ExecSort() to make it more clear about each method used.\n>\nWith relation to the braces, it's still not clear to me which style to\nfollow.\nI gave Ronan directions about it.\nAnd I think maybe, it's still not clear when to use it or not.\n\n\n> I also adjusted the comment down at the end of ExecSort that was\n> mentioning something about tuplesort_gettupleslot returning NULL.\n> Your patch didn't touch this, but to me, the comment just looked wrong\n> both before and after the changes. tuplesort_gettupleslot returns\n> false and sets the slot to empty when it runs out of tuples. Anyway,\n> I wrote something there that I think improves that.\n>\nCan help a little here, but, seems good to me.\n\n\n> I feel like this patch is commit-worthy now. However, I'll leave it\n> for a few days, maybe until after the weekend as there's been a fair\n> bit of interest and I imagine someone will have comments to make.\n>\nA little lack of time.\n\nBut I finally can understand v7b.\nReally struct field is necessary and he fails with the next tuple, ok.\nThe only conclusion I can come to is that he is faster because he fails to\nsort correctly.\nIt's no use being faster and getting wrong results.\n\nSo, +1 from me to commit v10.\n\nThanks for working together.\n\nRanier Vilela\n\nEm sex., 16 de jul. de 2021 às 00:45, David Rowley <dgrowleyml@gmail.com> escreveu:On Fri, 16 Jul 2021 at 02:53, Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n> Please find attached a v9 just moving the flag setting to ExecInitSort, and my\n> apologies if I misunderstood your point.\n\nI took this and adjusted a few things and ended up with the attached patch.\n\nThe changes are fairly minor. I made the bracing consistent between\nboth tuplesort_begin calls. I rewrote the comment at the top of\nExecSort() to make it more clear about each method used.With relation to the braces, it's still not clear to me which style to follow.I gave Ronan directions about it.And I think maybe, it's still not clear when to use it or not. \n\nI also adjusted the comment down at the end of ExecSort that was\nmentioning something about tuplesort_gettupleslot returning NULL.\nYour patch didn't touch this, but to me, the comment just looked wrong\nboth before and after the changes. tuplesort_gettupleslot returns\nfalse and sets the slot to empty when it runs out of tuples.  Anyway,\nI wrote something there that I think improves that.Can help a little here, but, seems good to me. \n\nI feel like this patch is commit-worthy now.  However, I'll leave it\nfor a few days, maybe until after the weekend as there's been a fair\nbit of interest and I imagine someone will have comments to make.A little lack of time. But I finally can understand v7b.Really struct field is necessary and he fails with the next tuple, ok.The only conclusion I can come to is that he is faster because he fails to sort correctly.It's no use being faster and getting wrong results.So, +1 from me to commit v10.Thanks for working together.Ranier Vilela", "msg_date": "Fri, 16 Jul 2021 07:22:39 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Thu, Jul 15, 2021 at 11:45 PM David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Fri, 16 Jul 2021 at 02:53, Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n> > Please find attached a v9 just moving the flag setting to ExecInitSort, and my\n> > apologies if I misunderstood your point.\n>\n> I took this and adjusted a few things and ended up with the attached patch.\n>\n> The changes are fairly minor. I made the bracing consistent between\n> both tuplesort_begin calls. I rewrote the comment at the top of\n> ExecSort() to make it more clear about each method used.\n>\n> I also adjusted the comment down at the end of ExecSort that was\n> mentioning something about tuplesort_gettupleslot returning NULL.\n> Your patch didn't touch this, but to me, the comment just looked wrong\n> both before and after the changes. tuplesort_gettupleslot returns\n> false and sets the slot to empty when it runs out of tuples. Anyway,\n> I wrote something there that I think improves that.\n>\n> I feel like this patch is commit-worthy now. However, I'll leave it\n> for a few days, maybe until after the weekend as there's been a fair\n> bit of interest and I imagine someone will have comments to make.\n\nThe only remaining question I have is whether or not costing needs to\nchange, given the very significant speedup for datum sort.\n\nJames\n\n\n", "msg_date": "Fri, 16 Jul 2021 09:14:33 -0400", "msg_from": "James Coleman <jtc331@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": " On Sat, 17 Jul 2021 at 01:14, James Coleman <jtc331@gmail.com> wrote:\n> The only remaining question I have is whether or not costing needs to\n> change, given the very significant speedup for datum sort.\n\nI'm looking at cost_tuplesort and the only thing that I think might\nmake sense would be to adjust how the input_bytes value is calculated.\nFor now, that's done with the following function that's used in quite\na number of places.\n\nstatic double\nrelation_byte_size(double tuples, int width)\n{\n return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));\n}\n\nIt seems, at least in the case of Sort, that using SizeofHeapTupleHead\nis just always wrong as it should be SizeofMinimalTupleHeader. I know\nthat's also the case for Memoize too. I've not checked the other\nlocations.\n\nThe only thing I can really see that we might do would be not add the\nMAXALIGN(SizeofHeapTupleHeader) when there's just a single column.\nWe'd need to pass down the number of attributes from\ncreate_sort_path() so we'd know when and when not to add that. I'm not\nsaying that we should do this. I'm just saying that I don't really see\nwhat else we might do.\n\nI can imagine another patch might just want to do a complete overhaul\nof all locations that use relation_byte_size(). There are various\nthings that function just does not account for. e.g, the fact that we\nallocate chunks in powers of 2 and that there's a chunk header added\non. Of course, \"width\" is just an estimate, so maybe trying to\ncalculate something too precisely wouldn't be too wise. However,\nthere's a bit of a chicken and the egg problem there as there'd be\nlittle incentive to improve \"width\" unless we started making more\naccurate use of the value.\n\nAnyway, none of the above take into account that the Datum sort is\njust a little faster, The only thing that exists in the existing cost\nmodal that we could use to adjust the cost of an in memory sort is the\ncomparison_cost. The problem there is that the comparison is exactly\nthe same in both Datum and Tuple sorts. The only thing that really\nchanges between Datum and Tuple sort is the fact that we don't make a\nMinimalTuple when doing a Datum sort. The cost modal, unfortunately,\ndoes not account for that. That kinda makes me think that we should\ndo nothing as if we start to account for making MemoryTuples then\nwe'll just penalise Tuple sorts and that might cause someone to be\nupset.\n\nDavid\n\n\n", "msg_date": "Sat, 17 Jul 2021 20:36:09 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Le samedi 17 juillet 2021, 10:36:09 CEST David Rowley a écrit :\n> On Sat, 17 Jul 2021 at 01:14, James Coleman <jtc331@gmail.com> wrote:\n> > The only remaining question I have is whether or not costing needs to\n> > change, given the very significant speedup for datum sort.\n> \n> I'm looking at cost_tuplesort and the only thing that I think might\n> make sense would be to adjust how the input_bytes value is calculated.\n> For now, that's done with the following function that's used in quite\n> a number of places.\n> \n> static double\n> relation_byte_size(double tuples, int width)\n> {\n> return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));\n> }\n> \n> It seems, at least in the case of Sort, that using SizeofHeapTupleHead\n> is just always wrong as it should be SizeofMinimalTupleHeader. I know\n> that's also the case for Memoize too. I've not checked the other\n> locations.\n> \n> The only thing I can really see that we might do would be not add the\n> MAXALIGN(SizeofHeapTupleHeader) when there's just a single column.\n> We'd need to pass down the number of attributes from\n> create_sort_path() so we'd know when and when not to add that. I'm not\n> saying that we should do this. I'm just saying that I don't really see\n> what else we might do.\n> \n> I can imagine another patch might just want to do a complete overhaul\n> of all locations that use relation_byte_size(). There are various\n> things that function just does not account for. e.g, the fact that we\n> allocate chunks in powers of 2 and that there's a chunk header added\n> on. Of course, \"width\" is just an estimate, so maybe trying to\n> calculate something too precisely wouldn't be too wise. However,\n> there's a bit of a chicken and the egg problem there as there'd be\n> little incentive to improve \"width\" unless we started making more\n> accurate use of the value.\n> \n> Anyway, none of the above take into account that the Datum sort is\n> just a little faster, The only thing that exists in the existing cost\n> modal that we could use to adjust the cost of an in memory sort is the\n> comparison_cost. The problem there is that the comparison is exactly\n> the same in both Datum and Tuple sorts. The only thing that really\n> changes between Datum and Tuple sort is the fact that we don't make a\n> MinimalTuple when doing a Datum sort. The cost modal, unfortunately,\n> does not account for that. That kinda makes me think that we should\n> do nothing as if we start to account for making MemoryTuples then\n> we'll just penalise Tuple sorts and that might cause someone to be\n> upset.\n> \nThank you for taking the time to perform that analysis. I agree with you and \ntt looks to me that if we were to start accounting for it, we would have to \nmake the change almost transparent for tuple sorts so that it stays roughly \nthe same, which is impossible since we don't apply the comparison cost to all \ntuples but only to the number of tuples we actually expect to compare.\n\nOn the other hand, if we don't change the sorting cost and it just ends up \nbeing faster in some cases I doubt anyone would complain.\n\n\n-- \nRonan Dunklau\n\n\n\n\n", "msg_date": "Mon, 19 Jul 2021 07:50:16 +0200", "msg_from": "Ronan Dunklau <ronan.dunklau@aiven.io>", "msg_from_op": true, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Sat, Jul 17, 2021 at 4:36 AM David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Sat, 17 Jul 2021 at 01:14, James Coleman <jtc331@gmail.com> wrote:\n> > The only remaining question I have is whether or not costing needs to\n> > change, given the very significant speedup for datum sort.\n>\n> I'm looking at cost_tuplesort and the only thing that I think might\n> make sense would be to adjust how the input_bytes value is calculated.\n> For now, that's done with the following function that's used in quite\n> a number of places.\n>\n> static double\n> relation_byte_size(double tuples, int width)\n> {\n> return tuples * (MAXALIGN(width) + MAXALIGN(SizeofHeapTupleHeader));\n> }\n>\n> It seems, at least in the case of Sort, that using SizeofHeapTupleHead\n> is just always wrong as it should be SizeofMinimalTupleHeader. I know\n> that's also the case for Memoize too. I've not checked the other\n> locations.\n>\n> The only thing I can really see that we might do would be not add the\n> MAXALIGN(SizeofHeapTupleHeader) when there's just a single column.\n> We'd need to pass down the number of attributes from\n> create_sort_path() so we'd know when and when not to add that. I'm not\n> saying that we should do this. I'm just saying that I don't really see\n> what else we might do.\n>\n> I can imagine another patch might just want to do a complete overhaul\n> of all locations that use relation_byte_size(). There are various\n> things that function just does not account for. e.g, the fact that we\n> allocate chunks in powers of 2 and that there's a chunk header added\n> on. Of course, \"width\" is just an estimate, so maybe trying to\n> calculate something too precisely wouldn't be too wise. However,\n> there's a bit of a chicken and the egg problem there as there'd be\n> little incentive to improve \"width\" unless we started making more\n> accurate use of the value.\n>\n> Anyway, none of the above take into account that the Datum sort is\n> just a little faster, The only thing that exists in the existing cost\n> modal that we could use to adjust the cost of an in memory sort is the\n> comparison_cost. The problem there is that the comparison is exactly\n> the same in both Datum and Tuple sorts. The only thing that really\n> changes between Datum and Tuple sort is the fact that we don't make a\n> MinimalTuple when doing a Datum sort. The cost modal, unfortunately,\n> does not account for that. That kinda makes me think that we should\n> do nothing as if we start to account for making MemoryTuples then\n> we'll just penalise Tuple sorts and that might cause someone to be\n> upset.\n\nTo be clear up front: I'm in favor of the patch, and I don't want to\nput unnecessary stumbling blocks up for it getting committed. So if we\ndecide to proceed as is, that's fine with me.\n\nBut I'm not sure that the \"cost model, unfortunately, does not account\nfor that\" is entirely accurate. The end of cost_tuplesort contains a\ncost \"per extracted tuple\". It does, however, note that it doesn't\ncharge cpu_tuple_cost, which maybe is what you'd want to fully\nincorporate this into the model. But given this run_cost isn't about\naccounting for comparison cost (that's been done earlier) which is the\npart that'd be the same between tuple and datum sort, it seems to me\nthat we could lower the cpu_operator_cost here by something like 10%\nif it's byref and 30% if it's byval?\n\nJames\n\n\n", "msg_date": "Mon, 19 Jul 2021 09:10:46 -0400", "msg_from": "James Coleman <jtc331@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Tue, 20 Jul 2021 at 01:10, James Coleman <jtc331@gmail.com> wrote:\n> To be clear up front: I'm in favor of the patch, and I don't want to\n> put unnecessary stumbling blocks up for it getting committed. So if we\n> decide to proceed as is, that's fine with me.\n\nThanks for making that clear.\n\n> But I'm not sure that the \"cost model, unfortunately, does not account\n> for that\" is entirely accurate. The end of cost_tuplesort contains a\n> cost \"per extracted tuple\". It does, however, note that it doesn't\n> charge cpu_tuple_cost, which maybe is what you'd want to fully\n> incorporate this into the model. But given this run_cost isn't about\n> accounting for comparison cost (that's been done earlier) which is the\n> part that'd be the same between tuple and datum sort, it seems to me\n> that we could lower the cpu_operator_cost here by something like 10%\n> if it's byref and 30% if it's byval?\n\nI failed to notice that last part that adds the additional cpu_operator_cost.\n\nThe default cpu_operator_cost is 0.0025, so with the 10k tuple\nbenchmark, that adds an additional charge of 25 to the total cost.\n\nIf we take test 1 from my results on v5 as an example:\n\n> Test1 446.1 657.3 147.32%\n\nLooking at explain for that query:\n\nregression=# explain select two from tenk1 order by two offset 1000000;\n QUERY PLAN\n----------------------------------------------------------------------\n Limit (cost=1133.95..1133.95 rows=1 width=4)\n -> Sort (cost=1108.97..1133.95 rows=9995 width=4)\n Sort Key: two\n -> Seq Scan on tenk1 (cost=0.00..444.95 rows=9995 width=4)\n(4 rows)\n\nIf we want the costs to reflect reality again here then we'd have\nreduce 1133.95 by something like 147.32% (the performance difference).\nThat would bring the cost down to 769.72, which is way more than we\nhave to play with than the 25 that the cpu_operator_cost * tuples\ngives us.\n\nIf we reduced the 25 by 30% in this case, we'd get 17.5 and the total\ncost would become 1126.45. That's not great considering the actual\nperformance indicates that 769.72 would be a better number.\n\nIf we look at John's result for test 1: He saw 588 tps on master and\n998 on v8. 1133.95 / 998.0 * 588.0 = 668.09, so we'd need even more\nto get close to reality on that machine.\n\nMy thoughts are that the small surcharge added at the end of\ncost_tuplesort() is just not enough for us to play with. I think to\nget us closer to fixing this correctly would require a redesign of the\ntuplesort costing entirely. I think that would be about an order of\nmagnitude more effort than this patch was, so I really feel like I\ndon't want to do this.\n\nI kinda feel that since the comparison_cost is always just 2.0 *\ncpu_operator_cost regardless of the number of columns in the sort,\nthen if we add too many new smarts to try and properly adjust for this\nnew optimization, unless we do a completly new cost modal for this,\nthen we might as well be putting lipstick on a pig.\n\nIt sounds like James mostly just mentioned the sorting just to ensure\nit was properly considered and does not really feel strongly that it\nneeds to be adjusted. Does anyone else feel that we should be\nadjusting it?\n\nDavid\n\n\n", "msg_date": "Tue, 20 Jul 2021 20:34:51 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "Em ter., 20 de jul. de 2021 às 05:35, David Rowley <dgrowleyml@gmail.com>\nescreveu:\n\n> On Tue, 20 Jul 2021 at 01:10, James Coleman <jtc331@gmail.com> wrote:\n> > To be clear up front: I'm in favor of the patch, and I don't want to\n> > put unnecessary stumbling blocks up for it getting committed. So if we\n> > decide to proceed as is, that's fine with me.\n>\n> Thanks for making that clear.\n>\n> > But I'm not sure that the \"cost model, unfortunately, does not account\n> > for that\" is entirely accurate. The end of cost_tuplesort contains a\n> > cost \"per extracted tuple\". It does, however, note that it doesn't\n> > charge cpu_tuple_cost, which maybe is what you'd want to fully\n> > incorporate this into the model. But given this run_cost isn't about\n> > accounting for comparison cost (that's been done earlier) which is the\n> > part that'd be the same between tuple and datum sort, it seems to me\n> > that we could lower the cpu_operator_cost here by something like 10%\n> > if it's byref and 30% if it's byval?\n>\n> I failed to notice that last part that adds the additional\n> cpu_operator_cost.\n>\n> The default cpu_operator_cost is 0.0025, so with the 10k tuple\n> benchmark, that adds an additional charge of 25 to the total cost.\n>\n> If we take test 1 from my results on v5 as an example:\n>\n> > Test1 446.1 657.3 147.32%\n>\n> Looking at explain for that query:\n>\n> regression=# explain select two from tenk1 order by two offset 1000000;\n> QUERY PLAN\n> ----------------------------------------------------------------------\n> Limit (cost=1133.95..1133.95 rows=1 width=4)\n> -> Sort (cost=1108.97..1133.95 rows=9995 width=4)\n> Sort Key: two\n> -> Seq Scan on tenk1 (cost=0.00..444.95 rows=9995 width=4)\n> (4 rows)\n>\n> If we want the costs to reflect reality again here then we'd have\n> reduce 1133.95 by something like 147.32% (the performance difference).\n> That would bring the cost down to 769.72, which is way more than we\n> have to play with than the 25 that the cpu_operator_cost * tuples\n> gives us.\n>\n> If we reduced the 25 by 30% in this case, we'd get 17.5 and the total\n> cost would become 1126.45. That's not great considering the actual\n> performance indicates that 769.72 would be a better number.\n>\n> If we look at John's result for test 1: He saw 588 tps on master and\n> 998 on v8. 1133.95 / 998.0 * 588.0 = 668.09, so we'd need even more\n> to get close to reality on that machine.\n>\n> My thoughts are that the small surcharge added at the end of\n> cost_tuplesort() is just not enough for us to play with. I think to\n> get us closer to fixing this correctly would require a redesign of the\n> tuplesort costing entirely. I think that would be about an order of\n> magnitude more effort than this patch was, so I really feel like I\n> don't want to do this.\n>\nI understand that redesign would require a lot of work,\nbut why not do it step by step?\n\n\n> I kinda feel that since the comparison_cost is always just 2.0 *\n> cpu_operator_cost regardless of the number of columns in the sort,\n> then if we add too many new smarts to try and properly adjust for this\n> new optimization, unless we do a completly new cost modal for this,\n> then we might as well be putting lipstick on a pig.\n>\nI think one first step is naming this 2.0?\nDoes this magic number don't have a good name?\n\n\n>\n> It sounds like James mostly just mentioned the sorting just to ensure\n> it was properly considered and does not really feel strongly that it\n> needs to be adjusted. Does anyone else feel that we should be\n> adjusting it?\n>\nI took a look at cost_tuplesort and I think that some small adjustments\ncould be made as part of the improvement process.\nIt is attached.\n1. long is a very problematic type; better int64?\n2. 1024 can be int, not long?\n3. 2 changed all to 2.0 (double)?\n4. If disk-based is not needed, IMO can we avoid calling relation_byte_size?\n\nFinally, to at least document (add comments) those conclusions,\nwould be nice, wouldn't it?\n\nregards,\nRanier Vilela", "msg_date": "Tue, 20 Jul 2021 08:28:35 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Fri, 16 Jul 2021 at 15:44, David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Fri, 16 Jul 2021 at 02:53, Ronan Dunklau <ronan.dunklau@aiven.io> wrote:\n> > Please find attached a v9 just moving the flag setting to ExecInitSort, and my\n> > apologies if I misunderstood your point.\n>\n> I took this and adjusted a few things and ended up with the attached patch.\n\nAttaching the same v10 patch again so the CF bot picks up the correct\npatch again.\n\nDavid", "msg_date": "Wed, 21 Jul 2021 13:07:35 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Tue, 20 Jul 2021 at 23:28, Ranier Vilela <ranier.vf@gmail.com> wrote:\n> I took a look at cost_tuplesort and I think that some small adjustments could be made as part of the improvement process.\n> It is attached.\n> 1. long is a very problematic type; better int64?\n> 2. 1024 can be int, not long?\n> 3. 2 changed all to 2.0 (double)?\n> 4. If disk-based is not needed, IMO can we avoid calling relation_byte_size?\n>\n> Finally, to at least document (add comments) those conclusions,\n> would be nice, wouldn't it?\n\nI don't think there's anything useful here. If you think otherwise,\nplease take it to another thread. Also, I'd recommend at least\ncompiling any patches you send to -hackers in the future. Going by the\nCF bot, this one does not.\n\nYou might also want to read up on type promotion rules in C. Your\nsort_mem calculation change does not do what you think it does. Class\nit as homework to figure out what's wrong with it. No need to report\nyour findings here. Just thought it would be useful for you to learn\nthose things.\n\nDavid\n\n\n", "msg_date": "Wed, 21 Jul 2021 13:15:19 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Tue, Jul 20, 2021 at 4:35 AM David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Tue, 20 Jul 2021 at 01:10, James Coleman <jtc331@gmail.com> wrote:\n> > To be clear up front: I'm in favor of the patch, and I don't want to\n> > put unnecessary stumbling blocks up for it getting committed. So if we\n> > decide to proceed as is, that's fine with me.\n>\n> Thanks for making that clear.\n>\n> > But I'm not sure that the \"cost model, unfortunately, does not account\n> > for that\" is entirely accurate. The end of cost_tuplesort contains a\n> > cost \"per extracted tuple\". It does, however, note that it doesn't\n> > charge cpu_tuple_cost, which maybe is what you'd want to fully\n> > incorporate this into the model. But given this run_cost isn't about\n> > accounting for comparison cost (that's been done earlier) which is the\n> > part that'd be the same between tuple and datum sort, it seems to me\n> > that we could lower the cpu_operator_cost here by something like 10%\n> > if it's byref and 30% if it's byval?\n>\n> I failed to notice that last part that adds the additional cpu_operator_cost.\n>\n> The default cpu_operator_cost is 0.0025, so with the 10k tuple\n> benchmark, that adds an additional charge of 25 to the total cost.\n>\n> If we take test 1 from my results on v5 as an example:\n>\n> > Test1 446.1 657.3 147.32%\n>\n> Looking at explain for that query:\n>\n> regression=# explain select two from tenk1 order by two offset 1000000;\n> QUERY PLAN\n> ----------------------------------------------------------------------\n> Limit (cost=1133.95..1133.95 rows=1 width=4)\n> -> Sort (cost=1108.97..1133.95 rows=9995 width=4)\n> Sort Key: two\n> -> Seq Scan on tenk1 (cost=0.00..444.95 rows=9995 width=4)\n> (4 rows)\n>\n> If we want the costs to reflect reality again here then we'd have\n> reduce 1133.95 by something like 147.32% (the performance difference).\n> That would bring the cost down to 769.72, which is way more than we\n> have to play with than the 25 that the cpu_operator_cost * tuples\n> gives us.\n>\n> If we reduced the 25 by 30% in this case, we'd get 17.5 and the total\n> cost would become 1126.45. That's not great considering the actual\n> performance indicates that 769.72 would be a better number.\n>\n> If we look at John's result for test 1: He saw 588 tps on master and\n> 998 on v8. 1133.95 / 998.0 * 588.0 = 668.09, so we'd need even more\n> to get close to reality on that machine.\n>\n> My thoughts are that the small surcharge added at the end of\n> cost_tuplesort() is just not enough for us to play with. I think to\n> get us closer to fixing this correctly would require a redesign of the\n> tuplesort costing entirely. I think that would be about an order of\n> magnitude more effort than this patch was, so I really feel like I\n> don't want to do this.\n>\n> I kinda feel that since the comparison_cost is always just 2.0 *\n> cpu_operator_cost regardless of the number of columns in the sort,\n> then if we add too many new smarts to try and properly adjust for this\n> new optimization, unless we do a completly new cost modal for this,\n> then we might as well be putting lipstick on a pig.\n>\n> It sounds like James mostly just mentioned the sorting just to ensure\n> it was properly considered and does not really feel strongly that it\n> needs to be adjusted. Does anyone else feel that we should be\n> adjusting it?\n\nThanks for doing the math measuring how much we could impact things.\n\nI'm +lots on getting this committed as is.\n\nThanks all for your work on the improvement!\n\nJames\n\n\n", "msg_date": "Tue, 20 Jul 2021 21:39:14 -0400", "msg_from": "James Coleman <jtc331@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Wed, 21 Jul 2021 at 13:39, James Coleman <jtc331@gmail.com> wrote:\n> Thanks for doing the math measuring how much we could impact things.\n>\n> I'm +lots on getting this committed as is.\n\nOk good. I plan on taking a final look at the v10 patch tomorrow\nmorning NZ time (about 12 hours from now) and if all is well, I'll\npush it.\n\nIf anyone feels differently, please let me know before then.\n\nDavid\n\n\n", "msg_date": "Wed, 21 Jul 2021 22:09:54 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "From: David Rowley <dgrowleyml@gmail.com>\r\n> On Wed, 21 Jul 2021 at 13:39, James Coleman <jtc331@gmail.com> wrote:\r\n> > Thanks for doing the math measuring how much we could impact things.\r\n> >\r\n> > I'm +lots on getting this committed as is.\r\n> \r\n> Ok good. I plan on taking a final look at the v10 patch tomorrow morning NZ\r\n> time (about 12 hours from now) and if all is well, I'll push it.\r\n> \r\n> If anyone feels differently, please let me know before then.\r\nHi,\r\n\r\nI noticed a minor thing about the v10 patch.\r\n\r\n-\r\n-\t\tfor (;;)\r\n+\t\tif (node->datumSort)\r\n \t\t{\r\n-\t\t\tslot = ExecProcNode(outerNode);\r\n-\r\n-\t\t\tif (TupIsNull(slot))\r\n-\t\t\t\tbreak;\r\n-\r\n-\t\t\ttuplesort_puttupleslot(tuplesortstate, slot);\r\n+\t\t\tfor (;;)\r\n+\t\t\t{\r\n+\t\t\t\tslot = ExecProcNode(outerNode);\r\n+\r\n+\t\t\t\tif (TupIsNull(slot))\r\n+\t\t\t\t\tbreak;\r\n+\t\t\t\tslot_getsomeattrs(slot, 1);\r\n+\t\t\t\ttuplesort_putdatum(tuplesortstate,\r\n+\t\t\t\t\t\t\t\t slot->tts_values[0],\r\n+\t\t\t\t\t\t\t\t slot->tts_isnull[0]);\r\n+\t\t\t}\r\n+\t\t}\r\n+\t\telse\r\n+\t\t{\r\n+\t\t\tfor (;;)\r\n+\t\t\t{\r\n+\t\t\t\tslot = ExecProcNode(outerNode);\r\n+\r\n+\t\t\t\tif (TupIsNull(slot))\r\n+\t\t\t\t\tbreak;\r\n+\t\t\t\ttuplesort_puttupleslot(tuplesortstate, slot);\r\n+\t\t\t}\r\n\r\nThe above seems can be shorter like the following ?\r\n\r\nfor (;;)\r\n{\r\n\tslot = ExecProcNode(outerNode);\r\n\tif (TupIsNull(slot))\r\n\t\tbreak;\r\n\tif (node->datumSort)\r\n\t{\r\n\t\tslot_getsomeattrs(slot, 1);\r\n\t\ttuplesort_putdatum(tuplesortstate,\r\n\t\t\t\t\tslot->tts_values[0],\r\n\t\t\t\t\tslot->tts_isnull[0]);\r\n\t}\r\n\telse\r\n\t\ttuplesort_puttupleslot(tuplesortstate, slot);\r\n}\r\n\r\nBest regards,\r\nhouzj\r\n\r\n", "msg_date": "Thu, 22 Jul 2021 00:27:40 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Thu, 22 Jul 2021 at 12:27, houzj.fnst@fujitsu.com\n<houzj.fnst@fujitsu.com> wrote:\n> The above seems can be shorter like the following ?\n>\n> for (;;)\n> {\n> slot = ExecProcNode(outerNode);\n> if (TupIsNull(slot))\n> break;\n> if (node->datumSort)\n> {\n> slot_getsomeattrs(slot, 1);\n> tuplesort_putdatum(tuplesortstate,\n> slot->tts_values[0],\n> slot->tts_isnull[0]);\n> }\n> else\n> tuplesort_puttupleslot(tuplesortstate, slot);\n> }\n\nI don't think that's a good change. It puts the branch inside the\nloop the pulls all tuples from the subplan. Given the loop is likely\nto be very hot combined with the fact that it's so simple, I'd much\nrather have two separate loops to keep the extra branch outside the\nloop. It's true the branch predictor is likely to get the prediction\ncorrect on each iteration, but unless the compiler rewrites this into\ntwo loops then the comparison and jump must be done per loop.\n\nDavid\n\n\n", "msg_date": "Thu, 22 Jul 2021 12:38:25 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On July 22, 2021 8:38 AM David Rowley <dgrowleyml@gmail.com>\r\n> On Thu, 22 Jul 2021 at 12:27, houzj.fnst@fujitsu.com <houzj.fnst@fujitsu.com>\r\n> wrote:\r\n> > The above seems can be shorter like the following ?\r\n> >\r\n> > for (;;)\r\n> > {\r\n> > slot = ExecProcNode(outerNode);\r\n> > if (TupIsNull(slot))\r\n> > break;\r\n> > if (node->datumSort)\r\n> > {\r\n> > slot_getsomeattrs(slot, 1);\r\n> > tuplesort_putdatum(tuplesortstate,\r\n> > slot->tts_values[0],\r\n> > slot->tts_isnull[0]);\r\n> > }\r\n> > else\r\n> > tuplesort_puttupleslot(tuplesortstate, slot); }\r\n> \r\n> I don't think that's a good change. It puts the branch inside the loop the pulls\r\n> all tuples from the subplan. Given the loop is likely to be very hot combined\r\n> with the fact that it's so simple, I'd much rather have two separate loops to\r\n> keep the extra branch outside the loop. It's true the branch predictor is likely\r\n> to get the prediction correct on each iteration, but unless the compiler\r\n> rewrites this into two loops then the comparison and jump must be done per\r\n> loop.\r\n\r\nAh, you are right, I missed that. Thanks for the explanation.\r\n\r\nBest regards,\r\nhouzj\r\n", "msg_date": "Thu, 22 Jul 2021 00:53:45 +0000", "msg_from": "\"houzj.fnst@fujitsu.com\" <houzj.fnst@fujitsu.com>", "msg_from_op": false, "msg_subject": "RE: [PATCH] Use optimized single-datum tuplesort in ExecSort" }, { "msg_contents": "On Wed, 21 Jul 2021 at 22:09, David Rowley <dgrowleyml@gmail.com> wrote:\n>\n> On Wed, 21 Jul 2021 at 13:39, James Coleman <jtc331@gmail.com> wrote:\n> > Thanks for doing the math measuring how much we could impact things.\n> >\n> > I'm +lots on getting this committed as is.\n>\n> Ok good. I plan on taking a final look at the v10 patch tomorrow\n> morning NZ time (about 12 hours from now) and if all is well, I'll\n> push it.\n\nPushed.\n\nDavid\n\n\n", "msg_date": "Thu, 22 Jul 2021 14:04:04 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH] Use optimized single-datum tuplesort in ExecSort" } ]
[ { "msg_contents": "Hi all,\r\n\r\n\r\nWhen I read the source code file src/backend/access/transam/xloginsert.c, I get something confused me.\r\nIn the function XLogSaveBufferForHint, the flags are always REGBUF_FORCE_IMAGE which means it is always need backups.\r\nIs it right? Why do not check the full_page_writes?\r\n\r\n\r\n\r\n\r\n--\r\nZhang Wenjie", "msg_date": "Tue, 6 Jul 2021 17:58:15 +0800", "msg_from": "\"=?ISO-8859-1?B?endq?=\" <757634191@qq.com>", "msg_from_op": true, "msg_subject": "Why is XLOG_FPI_FOR_HINT always need backups?" }, { "msg_contents": "\nOn Tue, 06 Jul 2021 at 17:58, zwj <757634191@qq.com> wrote:\n> Hi all,\n>\n> When I read the source code file src/backend/access/transam/xloginsert.c, I get something confused me.\n> In the function XLogSaveBufferForHint, the flags are always REGBUF_FORCE_IMAGE which means it is always need backups.\n> Is it right? Why do not check the full_page_writes?\n\nThe documentation [1] says:\n\n------------------------------------------------------------------------------\nwal_log_hints (boolean)\nWhen this parameter is on, the PostgreSQL server writes the entire content of\neach disk page to WAL during the first modification of that page after a\ncheckpoint, even for non-critical modifications of so-called hint bits.\n------------------------------------------------------------------------------\n\nDoes that mean whether the full_page_writes enable or not, if the wal_log_hints\nenabled, we always write the entire content of each disk page to WAL? If I'm\nright, should we mention this in wal_log_hints?\n\n[1] https://www.postgresql.org/docs/current/runtime-config-wal.html\n\n-- \nRegrads,\nJapin Li.\nChengDu WenWu Information Technology Co.,Ltd.\n\n\n", "msg_date": "Tue, 06 Jul 2021 19:04:25 +0800", "msg_from": "Japin Li <japinli@hotmail.com>", "msg_from_op": false, "msg_subject": "Re: Why is XLOG_FPI_FOR_HINT always need backups?" } ]
[ { "msg_contents": "Thank you for reply. \r\nYou are right and the PostgreSQL server writes the entire content of each disk page to WAL during the first modification of that page after a\r\ncheckpoint while data checksum is on. \r\n\r\n\r\nBut I wonder whether it is necessary or not while my file system can protect the blocks of database to be torn. And I read a comment in function&nbsp;MarkBufferDirtyHint:\r\n```\r\n/*\r\n&nbsp;* If we need to protect hint bit updates from torn writes, WAL-log a\r\n&nbsp;* full page image of the page. This full page image is only necessary\r\n&nbsp;* if the hint bit update is the first change to the page since the\r\n&nbsp;* last checkpoint.\r\n&nbsp;*\r\n&nbsp;* We don't check full_page_writes here because that logic is included\r\n&nbsp;* when we call XLogInsert() since the value changes dynamically.\r\n&nbsp;*/\r\n\r\n\r\n\r\n```\r\nHowever, the code tell me it has nothing to do with full_page_writes. I can't figure it out.&nbsp;\r\n\r\n\r\n--\r\nZhang Wenjie\r\n\r\n\r\n------------------ 原始邮件 ------------------\r\n发件人: \"Japin Li\" <japinli@hotmail.com&gt;;\r\n发送时间:&nbsp;2021年7月6日(星期二) 晚上7:04\r\n收件人:&nbsp;\"zwj\"<757634191@qq.com&gt;;\r\n抄送:&nbsp;\"pgsql-hackers\"<pgsql-hackers@lists.postgresql.org&gt;;\r\n主题:&nbsp;Re: Why is XLOG_FPI_FOR_HINT always need backups?\r\n\r\n\r\n\r\n\r\nOn Tue, 06 Jul 2021 at 17:58, zwj <757634191@qq.com&gt; wrote:\r\n&gt; Hi all,\r\n&gt;\r\n&gt; When I read the source code file src/backend/access/transam/xloginsert.c, I get something confused me.\r\n&gt; In the function XLogSaveBufferForHint, the flags are always REGBUF_FORCE_IMAGE which means it is always need backups.\r\n&gt; Is it right? Why do not check the full_page_writes?\r\n\r\nThe documentation [1] says:\r\n\r\n------------------------------------------------------------------------------\r\nwal_log_hints (boolean)\r\nWhen this parameter is on, the PostgreSQL server writes the entire content of\r\neach disk page to WAL during the first modification of that page after a\r\ncheckpoint, even for non-critical modifications of so-called hint bits.\r\n------------------------------------------------------------------------------\r\n\r\nDoes that mean whether the full_page_writes enable or not, if the wal_log_hints\r\nenabled, we always write the entire content of each disk page to WAL? If I'm\r\nright, should we mention this in wal_log_hints?\r\n\r\n[1] https://www.postgresql.org/docs/current/runtime-config-wal.html\r\n\r\n-- \r\nRegrads,\r\nJapin Li.\r\nChengDu WenWu Information Technology Co.,Ltd.\nThank you for reply. You are right and the PostgreSQL server writes the entire content of each disk page to WAL during the first modification of that page after acheckpoint while data checksum is on. But I wonder whether it is necessary or not while my file system can protect the blocks of database to be torn. And I read a comment in function MarkBufferDirtyHint:```/* * If we need to protect hint bit updates from torn writes, WAL-log a * full page image of the page. This full page image is only necessary * if the hint bit update is the first change to the page since the * last checkpoint. * * We don't check full_page_writes here because that logic is included * when we call XLogInsert() since the value changes dynamically. */```However, the code tell me it has nothing to do with full_page_writes. I can't figure it out. --Zhang Wenjie------------------ 原始邮件 ------------------发件人: \"Japin Li\" <japinli@hotmail.com>;发送时间: 2021年7月6日(星期二) 晚上7:04收件人: \"zwj\"<757634191@qq.com>;抄送: \"pgsql-hackers\"<pgsql-hackers@lists.postgresql.org>;主题: Re: Why is XLOG_FPI_FOR_HINT always need backups?On Tue, 06 Jul 2021 at 17:58, zwj <757634191@qq.com> wrote:> Hi all,>> When I read the source code file src/backend/access/transam/xloginsert.c, I get something confused me.> In the function XLogSaveBufferForHint, the flags are always REGBUF_FORCE_IMAGE which means it is always need backups.> Is it right? Why do not check the full_page_writes?The documentation [1] says:------------------------------------------------------------------------------wal_log_hints (boolean)When this parameter is on, the PostgreSQL server writes the entire content ofeach disk page to WAL during the first modification of that page after acheckpoint, even for non-critical modifications of so-called hint bits.------------------------------------------------------------------------------Does that mean whether the full_page_writes enable or not, if the wal_log_hintsenabled, we always write the entire content of each disk page to WAL? If I'mright, should we mention this in wal_log_hints?[1] https://www.postgresql.org/docs/current/runtime-config-wal.html-- Regrads,Japin Li.ChengDu WenWu Information Technology Co.,Ltd.", "msg_date": "Tue, 6 Jul 2021 20:42:23 +0800", "msg_from": "\"=?gb18030?B?endq?=\" <757634191@qq.com>", "msg_from_op": true, "msg_subject": "=?gb18030?B?u9i4tKO6IFdoeSBpcyBYTE9HX0ZQSV9GT1JfSElO?=\n =?gb18030?B?VCBhbHdheXMgbmVlZCBiYWNrdXBzPw==?=" }, { "msg_contents": "Hello.\n\nAt Tue, 6 Jul 2021 20:42:23 +0800, \"zwj\" <757634191@qq.com> wrote in \n> But I wonder whether it is necessary or not while my file system can protect the blocks of database to be torn. And I read a comment in function&nbsp;MarkBufferDirtyHint:\n> \n> /*\n> * If we need to protect hint bit updates from torn writes, WAL-log a\n> * full page image of the page. This full page image is only necessary\n> * if the hint bit update is the first change to the page since the\n> * last checkpoint.\n> *\n> * We don't check full_page_writes here because that logic is included\n> * when we call XLogInsert() since the value changes dynamically.\n> */\n> \n> However, the code tell me it has nothing to do with full_page_writes. I can't figure it out.\n\nThe doc of wal_log_hints says that \"*even* for non-critical\nmodifications of so-called hint bits\", which seems to me implies it is\nfollowing full_page_writes (and I think it is nonsense otherwise, as\nyou suspect).\n\nXLogSaveBufferForHint sets REGBUF_FORCE_IMAGE since 2c03216d83116 when\nthe symbol was introduced. As my understanding XLogInsert did not have\nan ability to enforce FPIs before the commit. The code comment above\nis older than that commit. So it seems to me a thinko that\nXLogSaveBufferForHint sets REGBUF_FORCE_IMAGE.\n\nI think the attached fixes that thinko.\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center", "msg_date": "Wed, 07 Jul 2021 16:11:25 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": false, "msg_subject": "Re: =?utf-8?B?5Zue5aSN77ya?= Why is XLOG_FPI_FOR_HINT always need\n backups?" }, { "msg_contents": "\n\nOn 2021/07/07 16:11, Kyotaro Horiguchi wrote:\n> Hello.\n> \n> At Tue, 6 Jul 2021 20:42:23 +0800, \"zwj\" <757634191@qq.com> wrote in\n>> But I wonder whether it is necessary or not while my file system can protect the blocks of database to be torn. And I read a comment in function&nbsp;MarkBufferDirtyHint:\n>>\n>> /*\n>> * If we need to protect hint bit updates from torn writes, WAL-log a\n>> * full page image of the page. This full page image is only necessary\n>> * if the hint bit update is the first change to the page since the\n>> * last checkpoint.\n>> *\n>> * We don't check full_page_writes here because that logic is included\n>> * when we call XLogInsert() since the value changes dynamically.\n>> */\n>>\n>> However, the code tell me it has nothing to do with full_page_writes. I can't figure it out.\n> \n> The doc of wal_log_hints says that \"*even* for non-critical\n> modifications of so-called hint bits\", which seems to me implies it is\n> following full_page_writes (and I think it is nonsense otherwise, as\n> you suspect).\n> \n> XLogSaveBufferForHint sets REGBUF_FORCE_IMAGE since 2c03216d83116 when\n> the symbol was introduced. As my understanding XLogInsert did not have\n> an ability to enforce FPIs before the commit. The code comment above\n> is older than that commit. So it seems to me a thinko that\n> XLogSaveBufferForHint sets REGBUF_FORCE_IMAGE.\n> \n> I think the attached fixes that thinko.\n\nWith the patch, I got the following error during crash recovery.\nI guess this happened because XLOG_FPI_FOR_HINT record had\nno backup blocks even though the replay logic for XLOG_FPI_FOR_HINT\nassumes it contains backup blocks.\n\nFATAL: unexpected XLogReadBufferForRedo result when restoring backup block\nCONTEXT: WAL redo at 0/169C600 for XLOG/FPI_FOR_HINT: ; blkref #0: rel 1663/13236/16385, blk 0\n\nRegards,\n\n-- \nFujii Masao\nAdvanced Computing Technology Center\nResearch and Development Headquarters\nNTT DATA CORPORATION\n\n\n", "msg_date": "Thu, 15 Jul 2021 22:50:08 +0900", "msg_from": "Fujii Masao <masao.fujii@oss.nttdata.com>", "msg_from_op": false, "msg_subject": "=?UTF-8?B?UmU6IOWbnuWkje+8miBXaHkgaXMgWExPR19GUElfRk9SX0hJTlQgYWx3?=\n =?UTF-8?Q?ays_need_backups=3f?=" }, { "msg_contents": "At Thu, 15 Jul 2021 22:50:08 +0900, Fujii Masao <masao.fujii@oss.nttdata.com> wrote in \n> On 2021/07/07 16:11, Kyotaro Horiguchi wrote:\n> > The doc of wal_log_hints says that \"*even* for non-critical\n> > modifications of so-called hint bits\", which seems to me implies it is\n> > following full_page_writes (and I think it is nonsense otherwise, as\n> > you suspect).\n> > XLogSaveBufferForHint sets REGBUF_FORCE_IMAGE since 2c03216d83116 when\n> > the symbol was introduced. As my understanding XLogInsert did not have\n> > an ability to enforce FPIs before the commit. The code comment above\n> > is older than that commit. So it seems to me a thinko that\n> > XLogSaveBufferForHint sets REGBUF_FORCE_IMAGE.\n> > I think the attached fixes that thinko.\n> \n> With the patch, I got the following error during crash recovery.\n> I guess this happened because XLOG_FPI_FOR_HINT record had\n> no backup blocks even though the replay logic for XLOG_FPI_FOR_HINT\n> assumes it contains backup blocks.\n> \n> FATAL: unexpected XLogReadBufferForRedo result when restoring backup\n> block\n> CONTEXT: WAL redo at 0/169C600 for XLOG/FPI_FOR_HINT: ; blkref #0: rel\n> 1663/13236/16385, blk 0\n\nSorry, I missed that the XLogReadBufferForRedo is expected to return\nBLK_RESTORED. And XLogReadBufferForRedo errors out when it tries to\nread nonexistent page without having an FPI (this happens for FSM\npages). Rather than teaching XLogReadBufferExtended to behave\ndifferrently for the case, I choosed to avoid trying to load the page\nwhen the corresponding FPI block is missing in XLOG_FPI_FOR_HINT, as\nif the record itself did not exist at all.\n\nSince differently from XLOG_FPI, XLOG_FPI_FOR_HINT has only one page\nreference at most, but in the attached the decision whether to read\nthe page or not is made for each block.\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center", "msg_date": "Fri, 16 Jul 2021 16:31:30 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": false, "msg_subject": "Re: =?utf-8?B?5Zue5aSN77ya?= Why is XLOG_FPI_FOR_HINT always need\n backups?" }, { "msg_contents": "\n\nOn 2021/07/16 16:31, Kyotaro Horiguchi wrote:\n> Sorry, I missed that the XLogReadBufferForRedo is expected to return\n> BLK_RESTORED. And XLogReadBufferForRedo errors out when it tries to\n> read nonexistent page without having an FPI (this happens for FSM\n> pages). Rather than teaching XLogReadBufferExtended to behave\n> differrently for the case, I choosed to avoid trying to load the page\n> when the corresponding FPI block is missing in XLOG_FPI_FOR_HINT, as\n> if the record itself did not exist at all.\n> \n> Since differently from XLOG_FPI, XLOG_FPI_FOR_HINT has only one page\n> reference at most, but in the attached the decision whether to read\n> the page or not is made for each block.\n\nThanks for updating the patch! It basically looks good to me.\n\n\t\t * Full-page image (FPI) records contain nothing else but a backup\n\t\t * block (or multiple backup blocks). Every block reference must\n\t\t * include a full-page image - otherwise there would be no point in\n\t\t * this record.\n\nThe above comment also needs to be updated?\n\nRegards,\n\n-- \nFujii Masao\nAdvanced Computing Technology Center\nResearch and Development Headquarters\nNTT DATA CORPORATION\n\n\n", "msg_date": "Sat, 17 Jul 2021 00:14:34 +0900", "msg_from": "Fujii Masao <masao.fujii@oss.nttdata.com>", "msg_from_op": false, "msg_subject": "=?UTF-8?B?UmU6IOWbnuWkje+8miBXaHkgaXMgWExPR19GUElfRk9SX0hJTlQgYWx3?=\n =?UTF-8?Q?ays_need_backups=3f?=" }, { "msg_contents": "At Sat, 17 Jul 2021 00:14:34 +0900, Fujii Masao <masao.fujii@oss.nttdata.com> wrote in \n> Thanks for updating the patch! It basically looks good to me.\n> \n> \t\t * Full-page image (FPI) records contain nothing else but a backup\n> \t\t * block (or multiple backup blocks). Every block reference must\n> \t\t * include a full-page image - otherwise there would be no point in\n> \t\t * this record.\n> \n> The above comment also needs to be updated?\n\nIn short, no. In contrast to the third paragraph, the first paragraph\nshould be thought that it is describing XLOG_FPI. However, actually\nit is not super obvious so it's better to make it clearer. Addition to\nthat, it seems to me (yes, to *me*) somewhat confused between \"block\nreference\", \"backup block\" and \"full-page image\". So I'd like to\nadjust the paragraph as the following.\n\n> * XLOG_FPI records contain nothing else but one or more block\n> * references. Every block reference must include a full-page image\n> * regardless of the full_page_writes setting - otherwise there would\n> * be no point in this record.\n\nFYI (or, for the record), the first paragraph got to the current shape\nby the commit 9155580fd5, where XLOG_FPI was modified to be able to\nhold more than one block references.\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center", "msg_date": "Mon, 19 Jul 2021 10:16:18 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": false, "msg_subject": "Re: =?utf-8?B?5Zue5aSN77ya?= Why is XLOG_FPI_FOR_HINT always need\n backups?" }, { "msg_contents": "\n\nOn 2021/07/19 10:16, Kyotaro Horiguchi wrote:\n> At Sat, 17 Jul 2021 00:14:34 +0900, Fujii Masao <masao.fujii@oss.nttdata.com> wrote in\n>> Thanks for updating the patch! It basically looks good to me.\n>>\n>> \t\t * Full-page image (FPI) records contain nothing else but a backup\n>> \t\t * block (or multiple backup blocks). Every block reference must\n>> \t\t * include a full-page image - otherwise there would be no point in\n>> \t\t * this record.\n>>\n>> The above comment also needs to be updated?\n> \n> In short, no. In contrast to the third paragraph, the first paragraph\n> should be thought that it is describing XLOG_FPI. However, actually\n> it is not super obvious so it's better to make it clearer. Addition to\n> that, it seems to me (yes, to *me*) somewhat confused between \"block\n> reference\", \"backup block\" and \"full-page image\". So I'd like to\n> adjust the paragraph as the following.\n\nUnderstood. Thanks for updating the patch!\n\nI slightly modified the comments and pushed the patch. Thanks!\n\nRegards,\n\n-- \nFujii Masao\nAdvanced Computing Technology Center\nResearch and Development Headquarters\nNTT DATA CORPORATION\n\n\n", "msg_date": "Wed, 21 Jul 2021 11:23:20 +0900", "msg_from": "Fujii Masao <masao.fujii@oss.nttdata.com>", "msg_from_op": false, "msg_subject": "=?UTF-8?B?UmU6IOWbnuWkje+8miBXaHkgaXMgWExPR19GUElfRk9SX0hJTlQgYWx3?=\n =?UTF-8?Q?ays_need_backups=3f?=" }, { "msg_contents": "At Wed, 21 Jul 2021 11:23:20 +0900, Fujii Masao <masao.fujii@oss.nttdata.com> wrote in \n> I slightly modified the comments and pushed the patch. Thanks!\n\nThank you for commiting this!\n\nregards.\n\n-- \nKyotaro Horiguchi\nNTT Open Source Software Center\n\n\n", "msg_date": "Wed, 21 Jul 2021 17:03:53 +0900 (JST)", "msg_from": "Kyotaro Horiguchi <horikyota.ntt@gmail.com>", "msg_from_op": false, "msg_subject": "Re: =?utf-8?B?5Zue5aSN77ya?= Why is XLOG_FPI_FOR_HINT always need\n backups?" } ]
[ { "msg_contents": "Hi,\n\n\nI have noticed that postgres_fdw do not push down the CASE WHEN clauses. \nIn the following case this normal:\n\n contrib_regression=# EXPLAIN (ANALYZE, VERBOSE) SELECT (CASE WHEN\n mod(c1, 4) = 0 THEN 1 ELSE 2 END) FROM ft1;\n                                                    QUERY PLAN\n -----------------------------------------------------------------------------------------------------------------\n  Foreign Scan on public.ft1  (cost=100.00..146.00 rows=1000\n width=4) (actual time=0.306..0.844 rows=822 loops=1)\n    Output: CASE WHEN (mod(c1, 4) = 0) THEN 1 ELSE 2 END\n    Remote SQL: SELECT \"C 1\" FROM \"S 1\".\"T 1\"\n  Planning Time: 0.139 ms\n  Execution Time: 1.057 ms\n (5 rows)\n\n\nbut in these other cases this is a performances killer, all records are \nfetched\n\n\n contrib_regression=# EXPLAIN (ANALYZE, VERBOSE) SELECT sum(CASE WHEN\n mod(c1, 4) = 0 THEN 1 ELSE 2 END) FROM ft1;\n                                                       QUERY PLAN\n -----------------------------------------------------------------------------------------------------------------------\n  Aggregate  (cost=148.50..148.51 rows=1 width=8) (actual\n time=1.421..1.422 rows=1 loops=1)\n    Output: sum(CASE WHEN (mod(c1, 4) = 0) THEN 1 ELSE 2 END)\n    ->  Foreign Scan on public.ft1  (cost=100.00..141.00 rows=1000\n width=4) (actual time=0.694..1.366 rows=822 loops=1)\n          Output: c1\n          Remote SQL: SELECT \"C 1\" FROM \"S 1\".\"T 1\"\n  Planning Time: 1.531 ms\n  Execution Time: 3.901 ms\n (7 rows)\n\n\n contrib_regression=# EXPLAIN (ANALYZE, VERBOSE) SELECT * FROM ft1\n WHERE c1 > (CASE WHEN mod(c1, 4) = 0 THEN 1 ELSE 100 END);\n                                                    QUERY PLAN\n -----------------------------------------------------------------------------------------------------------------\n  Foreign Scan on public.ft1  (cost=100.00..148.48 rows=333\n width=47) (actual time=0.763..3.003 rows=762 loops=1)\n    Output: c1, c2, c3, c4, c5, c6, c7, c8\n    Filter: (ft1.c1 > CASE WHEN (mod(ft1.c1, 4) = 0) THEN 1 ELSE 100\n END)\n    Rows Removed by Filter: 60\n    Remote SQL: SELECT \"C 1\", c2, c3, c4, c5, c6, c7, c8 FROM \"S\n 1\".\"T 1\"\n  Planning Time: 0.584 ms\n  Execution Time: 3.392 ms\n (7 rows)\n\n\nThe attached patch adds push down of CASE WHEN clauses. Queries above \nhave the following plans when this patch is applied:\n\n\n contrib_regression=# EXPLAIN (ANALYZE, VERBOSE) SELECT sum(CASE WHEN\n mod(c1, 4) = 0 THEN 1 ELSE 2 END) FROM ft1;\n                                           QUERY PLAN\n ----------------------------------------------------------------------------------------------\n  Foreign Scan  (cost=107.50..128.53 rows=1 width=8) (actual\n time=2.022..2.024 rows=1 loops=1)\n    Output: (sum(CASE WHEN (mod(c1, 4) = 0) THEN 1 ELSE 2 END))\n    Relations: Aggregate on (public.ft1)\n    Remote SQL: SELECT sum(CASE  WHEN (mod(\"C 1\", 4) = 0) THEN 1\n ELSE 2 END) FROM \"S 1\".\"T 1\"\n  Planning Time: 0.252 ms\n  Execution Time: 2.684 ms\n (6 rows)\n\n contrib_regression=# EXPLAIN (ANALYZE, VERBOSE) SELECT * FROM ft1\n WHERE c1 > (CASE WHEN mod(c1, 4) = 0 THEN 1 ELSE 100 END);\n QUERY PLAN\n\n ------------------------------------------------------------------------------------------------------------------------\n ----------------------\n  Foreign Scan on public.ft1  (cost=100.00..135.16 rows=333\n width=47) (actual time=1.797..3.463 rows=762 loops=1)\n    Output: c1, c2, c3, c4, c5, c6, c7, c8\n    Remote SQL: SELECT \"C 1\", c2, c3, c4, c5, c6, c7, c8 FROM \"S\n 1\".\"T 1\" WHERE ((\"C 1\" > CASE  WHEN (mod(\"C 1\", 4) = 0)\n THEN 1 ELSE 100 END))\n  Planning Time: 0.745 ms\n  Execution Time: 3.860 ms\n (5 rows)\n\n\nI don't see a good reason to never push the CASE WHEN clause but perhaps \nI'm missing something, any though?\n\n\nBest regards,\n\n-- \nGilles Darold\nMigOps Inc (http://migops.com)", "msg_date": "Wed, 7 Jul 2021 00:18:31 +0200", "msg_from": "Gilles Darold <gilles@migops.com>", "msg_from_op": true, "msg_subject": "[PATCH][postgres_fdw] Add push down of CASE WHEN clauses" }, { "msg_contents": "On Wed, 7 Jul 2021 at 10:18, Gilles Darold <gilles@migops.com> wrote:\n> I have noticed that postgres_fdw do not push down the CASE WHEN clauses. In the following case this normal:\n\nThis looks very similar to [1] which is in the current commitfest.\n\nAre you able to look over that patch and check to ensure you're not\ndoing anything extra that the other patch isn't. If so, then likely\nthe best way to progress would be for you to test and review that\npatch.\n\nDavid\n\n[1] https://commitfest.postgresql.org/33/3171/\n\n\n", "msg_date": "Wed, 7 Jul 2021 16:59:18 +1200", "msg_from": "David Rowley <dgrowleyml@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PATCH][postgres_fdw] Add push down of CASE WHEN clauses" }, { "msg_contents": "Le 07/07/2021 à 06:59, David Rowley a écrit :\n> On Wed, 7 Jul 2021 at 10:18, Gilles Darold <gilles@migops.com> wrote:\n>> I have noticed that postgres_fdw do not push down the CASE WHEN clauses. In the following case this normal:\n> This looks very similar to [1] which is in the current commitfest.\n>\n> Are you able to look over that patch and check to ensure you're not\n> doing anything extra that the other patch isn't. If so, then likely\n> the best way to progress would be for you to test and review that\n> patch.\n>\n> David\n>\n> [1] https://commitfest.postgresql.org/33/3171/\n\n\nStrange I have searched the commitfest yesterday but without success,\nthis is clearly a duplicate. Anyway, thanks for the pointer and yes I\nwill review Alexander's patch as I know the subject now :-)\n\n\nBest regards\n\n-- \nGilles Darold\nMigOps Inc (https://migops.com/)\n\n\n\n", "msg_date": "Wed, 7 Jul 2021 07:42:08 +0200", "msg_from": "Gilles Darold <gilles@migops.com>", "msg_from_op": true, "msg_subject": "Re: [PATCH][postgres_fdw] Add push down of CASE WHEN clauses" } ]
[ { "msg_contents": "When reading the output of `initdb --help` I could not clearly\nunderstand what the purpose of the --sync-only option was, until I\nread the documentation of initdb.\n\n -S, --sync-only only sync data directory\n\nPerhaps the confusion was caused by the fact that sync(hronization)\nmeans different things in different contexts, and many of those\ncontexts apply to databases, and to data directories; time sync, data\nsync, replica sync, etc.\n\nI think it would be helpful if the help message was slightly more\ndescriptive. Some options:\n\nUsed in patch:\n only sync data directory; does not modify any data\n\nTo match the wording of --sync-only option:\n write contents of data directory to disk; helpful after --no-sync option\n\nClearly specify the system operation used for the option\n perform fsync on data directory; helpful after --no-sync option\n\nBest regards,\n--\nGurjeet Singh http://gurjeet.singh.im/", "msg_date": "Tue, 6 Jul 2021 19:01:07 -0700", "msg_from": "Gurjeet Singh <gurjeet@singh.im>", "msg_from_op": true, "msg_subject": "Slightly improve initdb --sync-only option's help message" }, { "msg_contents": "On 7/6/21, 7:02 PM, \"Gurjeet Singh\" <gurjeet@singh.im> wrote:\r\n> I think it would be helpful if the help message was slightly more\r\n> descriptive. Some options:\r\n>\r\n> Used in patch:\r\n> only sync data directory; does not modify any data\r\n>\r\n> To match the wording of --sync-only option:\r\n> write contents of data directory to disk; helpful after --no-sync option\r\n>\r\n> Clearly specify the system operation used for the option\r\n> perform fsync on data directory; helpful after --no-sync option\r\n\r\nI think the help message should say exactly what the option does and\r\nshould avoid saying what it does not do or how it may be useful. I\r\nwould suggest the following to match the initdb docs [0]:\r\n\r\n -S, --sync-only safely write all database files to disk and exit\r\n\r\nIMO the note about the option being helpful after using the --no-sync\r\noption would fit better in the docs, but I'm struggling to think of a\r\nuse case for using --no-sync and then calling initdb again with\r\n--sync-only. Why wouldn't you just leave out --no-sync the first\r\ntime?\r\n\r\nNathan\r\n\r\n[0] https://www.postgresql.org/docs/devel/app-initdb.html\r\n\r\n", "msg_date": "Thu, 22 Jul 2021 22:32:18 +0000", "msg_from": "\"Bossart, Nathan\" <bossartn@amazon.com>", "msg_from_op": false, "msg_subject": "Re: Slightly improve initdb --sync-only option's help message" }, { "msg_contents": "On Thu, Jul 22, 2021 at 10:32:18PM +0000, Bossart, Nathan wrote:\n> On 7/6/21, 7:02 PM, \"Gurjeet Singh\" <gurjeet@singh.im> wrote:\n> > I think it would be helpful if the help message was slightly more\n> > descriptive. Some options:\n> >\n> > Used in patch:\n> > only sync data directory; does not modify any data\n> >\n> > To match the wording of --sync-only option:\n> > write contents of data directory to disk; helpful after --no-sync option\n> >\n> > Clearly specify the system operation used for the option\n> > perform fsync on data directory; helpful after --no-sync option\n> \n> I think the help message should say exactly what the option does and\n> should avoid saying what it does not do or how it may be useful. I\n> would suggest the following to match the initdb docs [0]:\n> \n> -S, --sync-only safely write all database files to disk and exit\n> \n> IMO the note about the option being helpful after using the --no-sync\n> option would fit better in the docs, but I'm struggling to think of a\n> use case for using --no-sync and then calling initdb again with\n> --sync-only. Why wouldn't you just leave out --no-sync the first\n> time?\n\nIt's to allow safely running bulk loading with fsync=off - if the bulk load\nfails, you can wipe out the partially-loaded cluster and start over.\nBut then transitioning to a durable state requires not just setting fsync=on,\nwhich enables future fsync calls. It also requires syncing all dirty buffers.\n\ndoc/src/sgml/config.sgml- <para>\ndoc/src/sgml/config.sgml- For reliable recovery when changing <varname>fsync</varname>\ndoc/src/sgml/config.sgml- off to on, it is necessary to force all modified buffers in the\ndoc/src/sgml/config.sgml- kernel to durable storage. This can be done while the cluster\ndoc/src/sgml/config.sgml- is shutdown or while <varname>fsync</varname> is on by running <command>initdb\ndoc/src/sgml/config.sgml: --sync-only</command>, running <command>sync</command>, unmounting the\ndoc/src/sgml/config.sgml- file system, or rebooting the server.\ndoc/src/sgml/config.sgml- </para>\n\n-- \nJustin\n\n\n", "msg_date": "Thu, 22 Jul 2021 20:31:18 -0500", "msg_from": "Justin Pryzby <pryzby@telsasoft.com>", "msg_from_op": false, "msg_subject": "Re: Slightly improve initdb --sync-only option's help message" }, { "msg_contents": "On 7/22/21, 6:31 PM, \"Justin Pryzby\" <pryzby@telsasoft.com> wrote:\r\n> On Thu, Jul 22, 2021 at 10:32:18PM +0000, Bossart, Nathan wrote:\r\n>> IMO the note about the option being helpful after using the --no-sync\r\n>> option would fit better in the docs, but I'm struggling to think of a\r\n>> use case for using --no-sync and then calling initdb again with\r\n>> --sync-only. Why wouldn't you just leave out --no-sync the first\r\n>> time?\r\n>\r\n> It's to allow safely running bulk loading with fsync=off - if the bulk load\r\n> fails, you can wipe out the partially-loaded cluster and start over.\r\n> But then transitioning to a durable state requires not just setting fsync=on,\r\n> which enables future fsync calls. It also requires syncing all dirty buffers.\r\n\r\nRight. Perhaps the documentation for --sync-only could mention this\r\nuse-case instead.\r\n\r\n Safely write all database files to disk and exit. This does\r\n not perform any of the normal initdb operations. Generally,\r\n this option is useful for ensuring reliable recovery after\r\n changing fsync from off to on.\r\n\r\nNathan\r\n\r\n", "msg_date": "Fri, 23 Jul 2021 16:08:51 +0000", "msg_from": "\"Bossart, Nathan\" <bossartn@amazon.com>", "msg_from_op": false, "msg_subject": "Re: Slightly improve initdb --sync-only option's help message" }, { "msg_contents": "On 7/23/21, 9:09 AM, \"Bossart, Nathan\" <bossartn@amazon.com> wrote:\r\n> On 7/22/21, 6:31 PM, \"Justin Pryzby\" <pryzby@telsasoft.com> wrote:\r\n>> On Thu, Jul 22, 2021 at 10:32:18PM +0000, Bossart, Nathan wrote:\r\n>>> IMO the note about the option being helpful after using the --no-sync\r\n>>> option would fit better in the docs, but I'm struggling to think of a\r\n>>> use case for using --no-sync and then calling initdb again with\r\n>>> --sync-only. Why wouldn't you just leave out --no-sync the first\r\n>>> time?\r\n>>\r\n>> It's to allow safely running bulk loading with fsync=off - if the bulk load\r\n>> fails, you can wipe out the partially-loaded cluster and start over.\r\n>> But then transitioning to a durable state requires not just setting fsync=on,\r\n>> which enables future fsync calls. It also requires syncing all dirty buffers.\r\n>\r\n> Right. Perhaps the documentation for --sync-only could mention this\r\n> use-case instead.\r\n>\r\n> Safely write all database files to disk and exit. This does\r\n> not perform any of the normal initdb operations. Generally,\r\n> this option is useful for ensuring reliable recovery after\r\n> changing fsync from off to on.\r\n\r\nHere are my suggestions in patch form.\r\n\r\nNathan", "msg_date": "Mon, 26 Jul 2021 18:05:50 +0000", "msg_from": "\"Bossart, Nathan\" <bossartn@amazon.com>", "msg_from_op": false, "msg_subject": "Re: Slightly improve initdb --sync-only option's help message" }, { "msg_contents": "On Mon, Jul 26, 2021 at 11:05 AM Bossart, Nathan <bossartn@amazon.com> wrote:\n> Here are my suggestions in patch form.\n\n+ printf(_(\" -S, --sync-only safely write all database\nfiles to disk and exit\\n\"));\n\nNot your patch's fault, but the word \"write\" does not seem to convey\nthe true intent of the option, because generally a \"write\" operation\nis still limited to dirtying the OS buffers, and does not guarantee\nsync-to-disk.\n\nIt'd be better if the help message said, either \"flush all database\nfiles to disk and exit\",or \"sync all database files to disk and exit\".\n\nBest regards,\n--\nGurjeet Singh http://gurjeet.singh.im/\n\n\n", "msg_date": "Wed, 28 Jul 2021 19:21:49 -0700", "msg_from": "Gurjeet Singh <gurjeet@singh.im>", "msg_from_op": true, "msg_subject": "Re: Slightly improve initdb --sync-only option's help message" }, { "msg_contents": "> On 26 Jul 2021, at 20:05, Bossart, Nathan <bossartn@amazon.com> wrote:\n\n> Here are my suggestions in patch form.\n\n-\tprintf(_(\" -S, --sync-only only sync data directory\\n\"));\n+\tprintf(_(\" -S, --sync-only safely write all database files to disk and exit\\n\"));\n\nI think removing the word \"only\" here is a net loss, since it IMO clearly\nconveyed that this option isn't doing any of the other initdb duties. The\ndocumentation states it in more words, but the help output must be brief so I\nthink \"only\" is a good keyword.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Thu, 29 Jul 2021 11:10:25 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: Slightly improve initdb --sync-only option's help message" }, { "msg_contents": "On 7/29/21, 2:11 AM, \"Daniel Gustafsson\" <daniel@yesql.se> wrote:\r\n> I think removing the word \"only\" here is a net loss, since it IMO clearly\r\n> conveyed that this option isn't doing any of the other initdb duties. The\r\n> documentation states it in more words, but the help output must be brief so I\r\n> think \"only\" is a good keyword.\r\n\r\nI've attached a new version of the patch in which I've attempted to\r\naddress both sets of feedback.\r\n\r\nNathan", "msg_date": "Thu, 29 Jul 2021 23:37:56 +0000", "msg_from": "\"Bossart, Nathan\" <bossartn@amazon.com>", "msg_from_op": false, "msg_subject": "Re: Slightly improve initdb --sync-only option's help message" }, { "msg_contents": "> On 30 Jul 2021, at 01:37, Bossart, Nathan <bossartn@amazon.com> wrote:\n> \n> On 7/29/21, 2:11 AM, \"Daniel Gustafsson\" <daniel@yesql.se> wrote:\n>> I think removing the word \"only\" here is a net loss, since it IMO clearly\n>> conveyed that this option isn't doing any of the other initdb duties. The\n>> documentation states it in more words, but the help output must be brief so I\n>> think \"only\" is a good keyword.\n> \n> I've attached a new version of the patch in which I've attempted to\n> address both sets of feedback.\n\nLGTM. I took the liberty to rephrase the \"and exit\" part of the initdb help\noutput match the other exiting options in there. Barring objections, I think\nthis is ready.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/", "msg_date": "Fri, 30 Jul 2021 11:21:50 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: Slightly improve initdb --sync-only option's help message" }, { "msg_contents": "On 7/30/21, 2:22 AM, \"Daniel Gustafsson\" <daniel@yesql.se> wrote:\r\n> LGTM. I took the liberty to rephrase the \"and exit\" part of the initdb help\r\n> output match the other exiting options in there. Barring objections, I think\r\n> this is ready.\r\n\r\nLGTM. Thanks!\r\n\r\nNathan\r\n\r\n", "msg_date": "Fri, 30 Jul 2021 16:27:18 +0000", "msg_from": "\"Bossart, Nathan\" <bossartn@amazon.com>", "msg_from_op": false, "msg_subject": "Re: Slightly improve initdb --sync-only option's help message" }, { "msg_contents": "> On 30 Jul 2021, at 18:27, Bossart, Nathan <bossartn@amazon.com> wrote:\n> \n> On 7/30/21, 2:22 AM, \"Daniel Gustafsson\" <daniel@yesql.se> wrote:\n>> LGTM. I took the liberty to rephrase the \"and exit\" part of the initdb help\n>> output match the other exiting options in there. Barring objections, I think\n>> this is ready.\n> \n> LGTM. Thanks!\n\nPushed to master, thanks!\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Mon, 16 Aug 2021 13:42:01 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: Slightly improve initdb --sync-only option's help message" }, { "msg_contents": "On Mon, Aug 16, 2021 at 4:42 AM Daniel Gustafsson <daniel@yesql.se> wrote:\n>\n> > On 30 Jul 2021, at 18:27, Bossart, Nathan <bossartn@amazon.com> wrote:\n> >\n> > On 7/30/21, 2:22 AM, \"Daniel Gustafsson\" <daniel@yesql.se> wrote:\n> >> LGTM. I took the liberty to rephrase the \"and exit\" part of the initdb help\n> >> output match the other exiting options in there. Barring objections, I think\n> >> this is ready.\n> >\n> > LGTM. Thanks!\n>\n> Pushed to master, thanks!\n\nThank you Daniel and Nathan! Much appreciated.\n\nBest regards,\n--\nGurjeet Singh http://gurjeet.singh.im/\n\n\n", "msg_date": "Mon, 16 Aug 2021 09:06:08 -0700", "msg_from": "Gurjeet Singh <gurjeet@singh.im>", "msg_from_op": true, "msg_subject": "Re: Slightly improve initdb --sync-only option's help message" } ]
[ { "msg_contents": "When reading through code for my previous patch [1] I realized that\ninitdb does *not* warn users that it ignores all other options (except\n-D/--pgdata) if the --sync-only option is used.\n\nI'm not able to come up with an exact situation to prove this, but\nthis behaviour seems potentially dangerous. The user might mix the\n--sync-only option with other options, but would be extremely\nsurprised if those other options didn't take effect.\n\nI _think_ we should throw an error if the user specifies any options\nthat are being ignored. But an error might break someone's automation\n(perhaps for their own good), since the current behaviour has been in\nplace for a very long time, so I'm willing to settle for at least a\nwarning in such a case.\n\n[1]:\nSlightly improve initdb --sync-only option's help message\nhttps://www.postgresql.org/message-id/CABwTF4U6hbNNE1bv%3DLxQdJybmUdZ5NJQ9rKY9tN82NXM8QH%2BiQ%40mail.gmail.com\n\nBest regards,\n--\nGurjeet Singh http://gurjeet.singh.im/", "msg_date": "Tue, 6 Jul 2021 19:23:54 -0700", "msg_from": "Gurjeet Singh <gurjeet@singh.im>", "msg_from_op": true, "msg_subject": "Warn if initdb's --sync-only option is mixed with other options" }, { "msg_contents": "> On 7 Jul 2021, at 04:23, Gurjeet Singh <gurjeet@singh.im> wrote:\n\n> I'm not able to come up with an exact situation to prove this, but\n> this behaviour seems potentially dangerous. The user might mix the\n> --sync-only option with other options, but would be extremely\n> surprised if those other options didn't take effect.\n\nIs if there is a plausible real world situation where a user runs --sync-only\ntogether with other arguments and also miss the fact that the other arguments\ndidn't take effect, and have bad consequences?\n\n> I _think_ we should throw an error if the user specifies any options\n> that are being ignored. But an error might break someone's automation\n> (perhaps for their own good), since the current behaviour has been in\n> place for a very long time, so I'm willing to settle for at least a\n> warning in such a case.\n\nWe typically don't issue warnings for incompatible arguments, but rather error\nout, and I'm not convinced this warrants breaking that. If we are going to do\nanything I think we should error out; if we decide to do something then we\nconsider the scripts that will break to already be broken.\n\nA slightly confusing aspect of this is however the error message for sync-only\nwhen -D or PGDATA isn't set says \"will reside\" when in fact it should say \"is\nresiding\" (or something along those lines):\n\n $ ./bin/initdb --sync-only\n initdb: error: no data directory specified\n You must identify the directory where the data for this database system\n will reside. Do this with either the invocation option -D or the\n environment variable PGDATA.\n\nI doubt it's worth complicating the code for this fringe case though.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Wed, 7 Jul 2021 15:25:13 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: Warn if initdb's --sync-only option is mixed with other options" }, { "msg_contents": "> On 7 Jul 2021, at 15:25, Daniel Gustafsson <daniel@yesql.se> wrote:\n\n> I doubt it's worth complicating the code for this fringe case though.\n\nThis thread has stalled, and with the updated docs/help output done for this\noption I don't think this is worth pursuing (especially given the lack of\ncomplaints over behavior which has existed for a very long time). I'm marking\nthis returned with feedback.\n\n--\nDaniel Gustafsson\t\thttps://vmware.com/\n\n\n\n", "msg_date": "Fri, 1 Oct 2021 13:22:04 +0200", "msg_from": "Daniel Gustafsson <daniel@yesql.se>", "msg_from_op": false, "msg_subject": "Re: Warn if initdb's --sync-only option is mixed with other options" } ]
[ { "msg_contents": "Hi \r\n\r\nWhen I used COPY FROM command on windows, I found that If the line data ends with a backslash and carriage return/newlines(\\r\\n),COPY FROM mishandle the line .\r\nAs a result, there were unexpected data loaded into database.\r\n\r\nThe following case can reproduce this issue.\r\n----------------------------------------------------------------------------\r\nData file:\r\nlines ending with carriage return/newlines(\\r\\n)\r\n ----- test.txt ------\r\n AAA\\ ★there is only one Backslash characters (\\) in the line end.\r\n BBB\r\n -------------------\r\n\r\nData loading:\r\n#CREATE TABLE copytest( a TEXT);\r\n#COPY copytest FROM '/test.txt';\r\n\r\nData in database:\r\n# SELECT * FROM copytest;\r\n a\r\n-------\r\n aaa\\r ★\\r is loaded unexpectedly\r\n bbb\r\n(2 rows)\r\n--------------------------------------------------------------------------\r\n\r\nIn this case , is it better to throw an error to user than to load the unexpected data to database?\r\n\r\nRegards,\r\n", "msg_date": "Wed, 7 Jul 2021 07:07:43 +0000", "msg_from": "\"jianggq@fujitsu.com\" <jianggq@fujitsu.com>", "msg_from_op": true, "msg_subject": "unexpected data loaded into database when used COPY FROM" }, { "msg_contents": "\"jianggq@fujitsu.com\" <jianggq@fujitsu.com> writes:\n> When I used COPY FROM command on windows, I found that If the line data ends with a backslash and carriage return/newlines(\\r\\n),COPY FROM mishandle the line .\n> As a result, there were unexpected data loaded into database.\n\nIf what you're saying is that backslash-\\r-\\n results in the \\r being\ntaken as a data character, there is exactly nothing unexpected about that.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 07 Jul 2021 11:34:06 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: unexpected data loaded into database when used COPY FROM" } ]
[ { "msg_contents": "Hi all,\n\nIndex vacuuming is one of the most time-consuming processes in lazy\nvacuuming. lazy_tid_reaped() is a large part among them. The attached\nthe flame graph shows a profile of a vacuum on a table that has one index\nand 80 million live rows and 20 million dead rows, where\nlazy_tid_reaped() accounts for about 47% of the total vacuum execution\ntime.\n\nlazy_tid_reaped() is essentially an existence check; for every index\ntuple, it checks if the TID of the heap it points to exists in the set\nof TIDs of dead tuples. The maximum size of dead tuples is limited by\nmaintenance_work_mem, and if the upper limit is reached, the heap scan\nis suspended, index vacuum and heap vacuum are performed, and then\nheap scan is resumed again. Therefore, in terms of the performance of\nindex vacuuming, there are two important factors: the performance of\nlookup TIDs from the set of dead tuples and its memory usage. The\nformer is obvious whereas the latter affects the number of Index\nvacuuming. In many index AMs, index vacuuming (i.e., ambulkdelete)\nperforms a full scan of the index, so it is important in terms of\nperformance to avoid index vacuuming from being executed more than\nonce during lazy vacuum.\n\nCurrently, the TIDs of dead tuples are stored in an array that is\ncollectively allocated at the start of lazy vacuum and TID lookup uses\nbsearch(). There are the following challenges and limitations:\n\n1. Don't allocate more than 1GB. There was a discussion to eliminate\nthis limitation by using MemoryContextAllocHuge() but there were\nconcerns about point 2[1].\n\n2. Allocate the whole memory space at once.\n\n3. Slow lookup performance (O(logN)).\n\nI’ve done some experiments in this area and would like to share the\nresults and discuss ideas.\n\nProblems Solutions\n===============\n\nFirstly, I've considered using existing data structures:\nIntegerSet(src/backend/lib/integerset.c) and\nTIDBitmap(src/backend/nodes/tidbitmap.c). Those address point 1 but\nonly either point 2 or 3. IntegerSet uses lower memory thanks to\nsimple-8b encoding but is slow at lookup, still O(logN), since it’s a\ntree structure. On the other hand, TIDBitmap has a good lookup\nperformance, O(1), but could unnecessarily use larger memory in some\ncases since it always allocates the space for bitmap enough to store\nall possible offsets. With 8kB blocks, the maximum number of line\npointers in a heap page is 291 (c.f., MaxHeapTuplesPerPage) so the\nbitmap is 40 bytes long and we always need 46 bytes in total per block\nincluding other meta information.\n\nSo I prototyped a new data structure dedicated to storing dead tuples\nduring lazy vacuum while borrowing the idea from Roaring Bitmap[2].\nThe authors provide an implementation of Roaring Bitmap[3] (Apache\n2.0 license). But I've implemented this idea from scratch because we\nneed to integrate it with Dynamic Shared Memory/Area to support\nparallel vacuum and need to support ItemPointerData, 6-bytes integer\nin total, whereas the implementation supports only 4-bytes integers.\nAlso, when it comes to vacuum, we neither need to compute the\nintersection, the union, nor the difference between sets, but need\nonly an existence check.\n\nThe data structure is somewhat similar to TIDBitmap. It consists of\nthe hash table and the container area; the hash table has entries per\nblock and each block entry allocates its memory space, called a\ncontainer, in the container area to store its offset numbers. The\ncontainer area is actually an array of bytes and can be enlarged as\nneeded. In the container area, the data representation of offset\nnumbers varies depending on their cardinality. It has three container\ntypes: array, bitmap, and run.\n\nFor example, if there are two dead tuples at offset 1 and 150, it uses\nthe array container that has an array of two 2-byte integers\nrepresenting 1 and 150, using 4 bytes in total. If we used the bitmap\ncontainer in this case, we would need 20 bytes instead. On the other\nhand, if there are consecutive 20 dead tuples from offset 1 to 20, it\nuses the run container that has an array of 2-byte integers. The first\nvalue in each pair represents a starting offset number, whereas the\nsecond value represents its length. Therefore, in this case, the run\ncontainer uses only 4 bytes in total. Finally, if there are dead\ntuples at every other offset from 1 to 100, it uses the bitmap\ncontainer that has an uncompressed bitmap, using 13 bytes. We need\nanother 16 bytes per block entry for hash table entry.\n\nThe lookup complexity of a bitmap container is O(1) whereas the one of\nan array and a run container is O(N) or O(logN) but the number of\nelements in those two containers should not be large it would not be a\nproblem.\n\nEvaluation\n========\n\nBefore implementing this idea and integrating it with lazy vacuum\ncode, I've implemented a benchmark tool dedicated to evaluating\nlazy_tid_reaped() performance[4]. It has some functions: generating\nTIDs for both index tuples and dead tuples, loading dead tuples to the\ndata structure, simulating lazy_tid_reaped() using those virtual heap\ntuples and heap dead tuples. So the code lacks many features such as\niteration and DSM/DSA support but it makes testing of data structure\neasier.\n\nFYI I've confirmed the validity of this tool. When I ran a vacuum on\nthe table with 3GB size, index vacuuming took 12.3 sec and\nlazy_tid_reaped() took approximately 8.5 sec. Simulating a similar\nsituation with the tool, the lookup benchmark with the array data\nstructure took approximately 8.0 sec. Given that the tool doesn't\nsimulate the cost of function calls, it seems to reasonably simulate\nit.\n\nI've evaluated the lookup performance and memory foot point against\nthe four types of data structure: array, integerset (intset),\ntidbitmap (tbm), roaring tidbitmap (rtbm) while changing the\ndistribution of dead tuples in blocks. Since tbm doesn't have a\nfunction for existence check I've added it and allocate enough memory\nto make sure that tbm never be lossy during the evaluation. In all\ntest cases, I simulated that the table has 1,000,000 blocks and every\nblock has at least one dead tuple. The benchmark scenario is that for\neach virtual heap tuple we check if there is its TID in the dead\ntuple storage. Here are the results of execution time in milliseconds\nand memory usage in bytes:\n\n* Test-case 1 (10 dead tuples in 20 offsets interval)\n\nAn array container is selected in this test case, using 20 bytes for each block.\n\n Execution Time Memory Usage\narray 14,140.91 60,008,248\nintset 9,350.08 50,339,840\ntbm 1,299.62 100,671,544\nrtbm 1,892.52 58,744,944\n\n* Test-case 2 (10 consecutive dead tuples from offset 1)\n\nA bitmap container is selected in this test case, using 2 bytes for each block.\n\n Execution Time Memory Usage\narray 1,056.60 60,008,248\nintset 650.85 50,339,840\ntbm 194.61 100,671,544\nrtbm 154.57 27,287,664\n\n* Test-case 3 (2 dead tuples at 1 and 100 offsets)\n\nAn array container is selected in this test case, using 4 bytes for\neach block. Since 'array' data structure (not array container of rtbm)\nuses only 12 bytes for each block, given that the size of hash table\nentry size in 'rtbm', 'array' data structure uses less memory.\n\n Execution Time Memory Usage\narray 6,054.22 12,008,248\nintset 4,203.41 16,785,408\ntbm 759.17 100,671,544\nrtbm 750.08 29,384,816\n\n* Test-case 4 (100 consecutive dead tuples from 1)\n\nA run container is selected in this test case, using 4 bytes for each block.\n\n Execution Time Memory Usage\narray 8,883.03 600,008,248\nintset 7,358.23 100,671,488\ntbm 758.81 100,671,544\nrtbm 764.33 29,384,816\n\nOverall, 'rtbm' has a much better lookup performance and good memory\nusage especially if there are relatively many dead tuples. However, in\nsome cases, 'intset' and 'array' have a better memory usage.\n\nFeedback is very welcome. Thank you for reading the email through to the end.\n\nRegards,\n\n[1] https://www.postgresql.org/message-id/CAGTBQpbDCaR6vv9%3DscXzuT8fSbckf%3Da3NgZdWFWZbdVugVht6Q%40mail.gmail.com\n[2] http://roaringbitmap.org/\n[3] https://github.com/RoaringBitmap/CRoaring\n[4] https://github.com/MasahikoSawada/pgtools/tree/master/bdbench\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/", "msg_date": "Wed, 7 Jul 2021 20:46:38 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "[PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, 7 Jul 2021 at 13:47, Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> Hi all,\n>\n> Index vacuuming is one of the most time-consuming processes in lazy\n> vacuuming. lazy_tid_reaped() is a large part among them. The attached\n> the flame graph shows a profile of a vacuum on a table that has one index\n> and 80 million live rows and 20 million dead rows, where\n> lazy_tid_reaped() accounts for about 47% of the total vacuum execution\n> time.\n>\n> [...]\n>\n> Overall, 'rtbm' has a much better lookup performance and good memory\n> usage especially if there are relatively many dead tuples. However, in\n> some cases, 'intset' and 'array' have a better memory usage.\n\nThose are some great results, with a good path to meaningful improvements.\n\n> Feedback is very welcome. Thank you for reading the email through to the end.\n\nThe current available infrastructure for TIDs is quite ill-defined for\nTableAM authors [0], and other TableAMs might want to use more than\njust the 11 bits in use by max-BLCKSZ HeapAM MaxHeapTuplesPerPage to\nidentify tuples. (MaxHeapTuplesPerPage is 1169 at the maximum 32k\nBLCKSZ, which requires 11 bits to fit).\n\nCould you also check what the (performance, memory) impact would be if\nthese proposed structures were to support the maximum\nMaxHeapTuplesPerPage of 1169 or the full uint16-range of offset\nnumbers that could be supported by our current TID struct?\n\nKind regards,\n\nMatthias van de Meent\n\n[0] https://www.postgresql.org/message-id/flat/0bbeb784050503036344e1f08513f13b2083244b.camel%40j-davis.com\n\n\n", "msg_date": "Wed, 7 Jul 2021 16:25:28 +0200", "msg_from": "Matthias van de Meent <boekewurm+postgres@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jul 7, 2021 at 4:47 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> Currently, the TIDs of dead tuples are stored in an array that is\n> collectively allocated at the start of lazy vacuum and TID lookup uses\n> bsearch(). There are the following challenges and limitations:\n>\n> 1. Don't allocate more than 1GB. There was a discussion to eliminate\n> this limitation by using MemoryContextAllocHuge() but there were\n> concerns about point 2[1].\n\nI think that the main problem with the 1GB limitation is that it is\nsurprising -- it can cause disruption when we first exceed the magical\nlimit of ~174 million TIDs. This can cause us to dirty index pages a\nsecond time when we might have been able to just do it once with\nsufficient memory for TIDs. OTOH there are actually cases where having\nless memory for TIDs makes performance *better* because of locality\neffects. This perverse behavior with memory sizing isn't a rare case\nthat we can safely ignore -- unfortunately it's fairly common.\n\nMy point is that we should be careful to choose the correct goal.\nObviously memory use matters. But it might be more helpful to think of\nmemory use as just a proxy for what truly matters, not a goal in\nitself. It's hard to know what this means (what is the \"real goal\"?),\nand hard to measure it even if you know for sure. It could still be\nuseful to think of it like this.\n\n> A run container is selected in this test case, using 4 bytes for each block.\n>\n> Execution Time Memory Usage\n> array 8,883.03 600,008,248\n> intset 7,358.23 100,671,488\n> tbm 758.81 100,671,544\n> rtbm 764.33 29,384,816\n>\n> Overall, 'rtbm' has a much better lookup performance and good memory\n> usage especially if there are relatively many dead tuples. However, in\n> some cases, 'intset' and 'array' have a better memory usage.\n\nThis seems very promising.\n\nI wonder how much you have thought about the index AM side. It makes\nsense to initially evaluate these techniques using this approach of\nseparating the data structure from how it is used by VACUUM -- I think\nthat that was a good idea. But at the same time there may be certain\nimportant theoretical questions that cannot be answered this way --\nquestions about how everything \"fits together\" in a real VACUUM might\nmatter a lot. You've probably thought about this at least a little\nalready. Curious to hear how you think it \"fits together\" with the\nwork that you've done already.\n\nThe loop inside btvacuumpage() makes each loop iteration call the\ncallback -- this is always a call to lazy_tid_reaped() in practice.\nAnd that's where we do binary searches. These binary searches are\nusually where we see a huge number of cycles spent when we look at\nprofiles, including the profile that produced your flame graph. But I\nworry that that might be a bit misleading -- the way that profilers\nattribute costs is very complicated and can never be fully trusted.\nWhile it is true that lazy_tid_reaped() often accesses main memory,\nwhich will of course add a huge amount of latency and make it a huge\nbottleneck, the \"big picture\" is still relevant.\n\nI think that the compiler currently has to make very conservative\nassumptions when generating the machine code used by the loop inside\nbtvacuumpage(), which calls through an opaque function pointer at\nleast once per loop iteration -- anything can alias, so the compiler\nmust be conservative. The data dependencies are hard for both the\ncompiler and the CPU to analyze. The cost of using a function pointer\ncompared to a direct function call is usually quite low, but there are\nimportant exceptions -- cases where it prevents other useful\noptimizations. Maybe this is an exception.\n\nI wonder how much it would help to break up that loop into two loops.\nMake the callback into a batch operation that generates state that\ndescribes what to do with each and every index tuple on the leaf page.\nThe first loop would build a list of TIDs, then you'd call into\nvacuumlazy.c and get it to process the TIDs, and finally the second\nloop would physically delete the TIDs that need to be deleted. This\nwould mean that there would be only one call per leaf page per\nbtbulkdelete(). This would reduce the number of calls to the callback\nby at least 100x, and maybe more than 1000x.\n\nThis approach would make btbulkdelete() similar to\n_bt_simpledel_pass() + _bt_delitems_delete_check(). This is not really\nan independent idea to your ideas -- I imagine that this would work\nfar better when combined with a more compact data structure, which is\nnaturally more capable of batch processing than a simple array of\nTIDs. Maybe this will help the compiler and the CPU to fully\nunderstand the *natural* data dependencies, so that they can be as\neffective as possible in making the code run fast. It's possible that\na modern CPU will be able to *hide* the latency more intelligently\nthan what we have today. The latency is such a big problem that we may\nbe able to justify \"wasting\" other CPU resources, just because it\nsometimes helps with hiding the latency. For example, it might\nactually be okay to sort all of the TIDs on the page to make the bulk\nprocessing work -- though you might still do a precheck that is\nsimilar to the precheck inside lazy_tid_reaped() that was added by you\nin commit bbaf315309e.\n\nOf course it's very easy to be wrong about stuff like this. But it\nmight not be that hard to prototype. You can literally copy and paste\ncode from _bt_delitems_delete_check() to do this. It does the same\nbasic thing already.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Wed, 7 Jul 2021 13:24:06 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jul 7, 2021 at 1:24 PM Peter Geoghegan <pg@bowt.ie> wrote:\n> I wonder how much it would help to break up that loop into two loops.\n> Make the callback into a batch operation that generates state that\n> describes what to do with each and every index tuple on the leaf page.\n> The first loop would build a list of TIDs, then you'd call into\n> vacuumlazy.c and get it to process the TIDs, and finally the second\n> loop would physically delete the TIDs that need to be deleted. This\n> would mean that there would be only one call per leaf page per\n> btbulkdelete(). This would reduce the number of calls to the callback\n> by at least 100x, and maybe more than 1000x.\n\nMaybe for something like rtbm.c (which is inspired by Roaring\nbitmaps), you would really want to use an \"intersection\" operation for\nthis. The TIDs that we need to physically delete from the leaf page\ninside btvacuumpage() are the intersection of two bitmaps: our bitmap\nof all TIDs on the leaf page, and our bitmap of all TIDs that need to\nbe deleting by the ongoing btbulkdelete() call.\n\nObviously the typical case is that most TIDs in the index do *not* get\ndeleted -- needing to delete more than ~20% of all TIDs in the index\nwill be rare. Ideally it would be very cheap to figure out that a TID\ndoes not need to be deleted at all. Something a little like a negative\ncache (but not a true negative cache). This is a little bit like how\nhash joins can be made faster by adding a Bloom filter -- most hash\nprobes don't need to join a tuple in the real world, and we can make\nthese hash probes even faster by using a Bloom filter as a negative\ncache.\n\nIf you had the list of TIDs from a leaf page sorted for batch\nprocessing, and if you had roaring bitmap style \"chunks\" with\n\"container\" metadata stored in the data structure, you could then use\nmerging/intersection -- that has some of the same advantages. I think\nthat this would be a lot more efficient than having one binary search\nper TID. Most TIDs from the leaf page can be skipped over very\nquickly, in large groups. It's very rare for VACUUM to need to delete\nTIDs from completely random heap table blocks in the real world (some\nkind of pattern is much more common).\n\nWhen this merging process finds 1 TID that might really be deletable\nthen it's probably going to find much more than 1 -- better to make\nthat cache miss take care of all of the TIDs together. Also seems like\nthe CPU could do some clever prefetching with this approach -- it\ncould prefetch TIDs where the initial chunk metadata is insufficient\nto eliminate them early -- these are the groups of TIDs that will have\nmany TIDs that we actually need to delete. ISTM that improving\ntemporal locality through batching could matter a lot here.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Wed, 7 Jul 2021 15:50:48 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jul 7, 2021 at 11:25 PM Matthias van de Meent\n<boekewurm+postgres@gmail.com> wrote:\n>\n> On Wed, 7 Jul 2021 at 13:47, Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > Hi all,\n> >\n> > Index vacuuming is one of the most time-consuming processes in lazy\n> > vacuuming. lazy_tid_reaped() is a large part among them. The attached\n> > the flame graph shows a profile of a vacuum on a table that has one index\n> > and 80 million live rows and 20 million dead rows, where\n> > lazy_tid_reaped() accounts for about 47% of the total vacuum execution\n> > time.\n> >\n> > [...]\n> >\n> > Overall, 'rtbm' has a much better lookup performance and good memory\n> > usage especially if there are relatively many dead tuples. However, in\n> > some cases, 'intset' and 'array' have a better memory usage.\n>\n> Those are some great results, with a good path to meaningful improvements.\n>\n> > Feedback is very welcome. Thank you for reading the email through to the end.\n>\n> The current available infrastructure for TIDs is quite ill-defined for\n> TableAM authors [0], and other TableAMs might want to use more than\n> just the 11 bits in use by max-BLCKSZ HeapAM MaxHeapTuplesPerPage to\n> identify tuples. (MaxHeapTuplesPerPage is 1169 at the maximum 32k\n> BLCKSZ, which requires 11 bits to fit).\n>\n> Could you also check what the (performance, memory) impact would be if\n> these proposed structures were to support the maximum\n> MaxHeapTuplesPerPage of 1169 or the full uint16-range of offset\n> numbers that could be supported by our current TID struct?\n\nI think tbm will be the most affected by the memory impact of the\nlarger maximum MaxHeapTuplesPerPage. For example, with 32kB blocks\n(MaxHeapTuplesPerPage = 1169), even if there is only one dead tuple in\na block, it will always require at least 147 bytes per block.\n\nRtbm chooses the container type among array, bitmap, or run depending\non the number and distribution of dead tuples in a block, and only\nbitmap containers can be searched with O(1). Run containers depend on\nthe distribution of dead tuples within a block. So let’s compare array\nand bitmap containers.\n\nWith 8kB blocks (MaxHeapTuplesPerPage = 291), 36 bytes are needed for\na bitmap container at maximum. In other words, when compared to an\narray container, bitmap will be chosen if there are more than 18 dead\ntuples in a block. On the other hand, with 32kB blocks\n(MaxHeapTuplesPerPage = 1169), 147 bytes are needed for a bitmap\ncontainer at maximum, so bitmap container will be chosen if there are\nmore than 74 dead tuples in a block. And, with full uint16-range\n(MaxHeapTuplesPerPage = 65535), 8192 bytes are needed at maximum, so\nbitmap container will be chosen if there are more than 4096 dead\ntuples in a block. Therefore, in any case, if more than about 6% of\ntuples in a block are garbage, a bitmap container will be chosen and\nbring a faster lookup performance. (Of course, if a run container is\nchosen, the container size gets smaller but the lookup performance is\nO(logN).) But if the number of dead tuples in the table is small and\nwe have the larger MaxHeapTuplesPerPage, it’s likely to choose an\narray container, and the lookup performance becomes O(logN). Still, it\nshould be faster than the array data structure because the range of\nsearch targets in an array container is much smaller.\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Thu, 8 Jul 2021 14:30:59 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jul 8, 2021 at 5:24 AM Peter Geoghegan <pg@bowt.ie> wrote:\n>\n> On Wed, Jul 7, 2021 at 4:47 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > Currently, the TIDs of dead tuples are stored in an array that is\n> > collectively allocated at the start of lazy vacuum and TID lookup uses\n> > bsearch(). There are the following challenges and limitations:\n> >\n> > 1. Don't allocate more than 1GB. There was a discussion to eliminate\n> > this limitation by using MemoryContextAllocHuge() but there were\n> > concerns about point 2[1].\n>\n> I think that the main problem with the 1GB limitation is that it is\n> surprising -- it can cause disruption when we first exceed the magical\n> limit of ~174 million TIDs. This can cause us to dirty index pages a\n> second time when we might have been able to just do it once with\n> sufficient memory for TIDs. OTOH there are actually cases where having\n> less memory for TIDs makes performance *better* because of locality\n> effects. This perverse behavior with memory sizing isn't a rare case\n> that we can safely ignore -- unfortunately it's fairly common.\n>\n> My point is that we should be careful to choose the correct goal.\n> Obviously memory use matters. But it might be more helpful to think of\n> memory use as just a proxy for what truly matters, not a goal in\n> itself. It's hard to know what this means (what is the \"real goal\"?),\n> and hard to measure it even if you know for sure. It could still be\n> useful to think of it like this.\n\nAs I wrote in the first email, I think there are two important factors\nin index vacuuming performance: the performance to check if heap TID\nthat an index tuple points to is dead, and the number of times to\nperform index bulk-deletion. The flame graph I attached in the first\nmail shows CPU spent much time on lazy_tid_reaped() but vacuum is a\ndisk-intensive operation in practice. Given that most index AM's\nbulk-deletion does a full index scan and a table could have multiple\nindexes, reducing the number of times to perform index bulk-deletion\nreally contributes to reducing the execution time, especially for\nlarge tables. I think that a more compact data structure for dead\ntuple TIDs is one of the ways to achieve that.\n\n>\n> > A run container is selected in this test case, using 4 bytes for each block.\n> >\n> > Execution Time Memory Usage\n> > array 8,883.03 600,008,248\n> > intset 7,358.23 100,671,488\n> > tbm 758.81 100,671,544\n> > rtbm 764.33 29,384,816\n> >\n> > Overall, 'rtbm' has a much better lookup performance and good memory\n> > usage especially if there are relatively many dead tuples. However, in\n> > some cases, 'intset' and 'array' have a better memory usage.\n>\n> This seems very promising.\n>\n> I wonder how much you have thought about the index AM side. It makes\n> sense to initially evaluate these techniques using this approach of\n> separating the data structure from how it is used by VACUUM -- I think\n> that that was a good idea. But at the same time there may be certain\n> important theoretical questions that cannot be answered this way --\n> questions about how everything \"fits together\" in a real VACUUM might\n> matter a lot. You've probably thought about this at least a little\n> already. Curious to hear how you think it \"fits together\" with the\n> work that you've done already.\n\nYeah, that definitely needs to be considered. Currently, what we need\nfor the dead tuple storage for lazy vacuum are store, lookup, and\niteration. And given the parallel vacuum, it has to be able to be\nallocated on DSM or DSA. While implementing the PoC code, I'm trying\nto integrate it with the current lazy vacuum code. As far as I've seen\nso far, the integration is not hard, at least with the *current* lazy\nvacuum code and index AMs code.\n\n>\n> The loop inside btvacuumpage() makes each loop iteration call the\n> callback -- this is always a call to lazy_tid_reaped() in practice.\n> And that's where we do binary searches. These binary searches are\n> usually where we see a huge number of cycles spent when we look at\n> profiles, including the profile that produced your flame graph. But I\n> worry that that might be a bit misleading -- the way that profilers\n> attribute costs is very complicated and can never be fully trusted.\n> While it is true that lazy_tid_reaped() often accesses main memory,\n> which will of course add a huge amount of latency and make it a huge\n> bottleneck, the \"big picture\" is still relevant.\n>\n> I think that the compiler currently has to make very conservative\n> assumptions when generating the machine code used by the loop inside\n> btvacuumpage(), which calls through an opaque function pointer at\n> least once per loop iteration -- anything can alias, so the compiler\n> must be conservative. The data dependencies are hard for both the\n> compiler and the CPU to analyze. The cost of using a function pointer\n> compared to a direct function call is usually quite low, but there are\n> important exceptions -- cases where it prevents other useful\n> optimizations. Maybe this is an exception.\n>\n> I wonder how much it would help to break up that loop into two loops.\n> Make the callback into a batch operation that generates state that\n> describes what to do with each and every index tuple on the leaf page.\n> The first loop would build a list of TIDs, then you'd call into\n> vacuumlazy.c and get it to process the TIDs, and finally the second\n> loop would physically delete the TIDs that need to be deleted. This\n> would mean that there would be only one call per leaf page per\n> btbulkdelete(). This would reduce the number of calls to the callback\n> by at least 100x, and maybe more than 1000x.\n>\n> This approach would make btbulkdelete() similar to\n> _bt_simpledel_pass() + _bt_delitems_delete_check(). This is not really\n> an independent idea to your ideas -- I imagine that this would work\n> far better when combined with a more compact data structure, which is\n> naturally more capable of batch processing than a simple array of\n> TIDs. Maybe this will help the compiler and the CPU to fully\n> understand the *natural* data dependencies, so that they can be as\n> effective as possible in making the code run fast. It's possible that\n> a modern CPU will be able to *hide* the latency more intelligently\n> than what we have today. The latency is such a big problem that we may\n> be able to justify \"wasting\" other CPU resources, just because it\n> sometimes helps with hiding the latency. For example, it might\n> actually be okay to sort all of the TIDs on the page to make the bulk\n> processing work -- though you might still do a precheck that is\n> similar to the precheck inside lazy_tid_reaped() that was added by you\n> in commit bbaf315309e.\n\nInteresting idea. I remember you mentioned this idea somewhere and\nI've considered this idea too while implementing the PoC code. It's\ndefinitely worth trying. Maybe we can write a patch for this as a\nseparate patch? It will change index AM and could improve also the\ncurrent bulk-deletion. We can consider a better data structure on top\nof this idea.\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Thu, 8 Jul 2021 17:47:11 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Very nice results.\n\nI have been working on the same problem but a bit different solution -\na mix of binary search for (sub)pages and 32-bit bitmaps for\ntid-in-page.\n\nEven with currebnt allocation heuristics (allocate 291 tids per page)\nit initially allocate much less space, instead of current 291*6=1746\nbytes per page it needs to allocate 80 bytes.\n\nAlso it can be laid out so that it is friendly to parallel SIMD\nsearches doing up to 8 tid lookups in parallel.\n\nThat said, for allocating the tid array, the best solution is to\npostpone it as much as possible and to do the initial collection into\na file, which\n\n1) postpones the memory allocation to the beginning of index cleanups\n\n2) lets you select the correct size and structure as you know more\nabout the distribution at that time\n\n3) do the first heap pass in one go and then advance frozenxmin\n*before* index cleanup\n\nAlso, collecting dead tids into a file makes it trivial (well, almost\n:) ) to parallelize the initial heap scan, so more resources can be\nthrown at it if available.\n\nCheers\n-----\nHannu Krosing\nGoogle Cloud - We have a long list of planned contributions and we are hiring.\nContact me if interested.\n\n\n\n\nOn Thu, Jul 8, 2021 at 10:48 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Jul 8, 2021 at 5:24 AM Peter Geoghegan <pg@bowt.ie> wrote:\n> >\n> > On Wed, Jul 7, 2021 at 4:47 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > Currently, the TIDs of dead tuples are stored in an array that is\n> > > collectively allocated at the start of lazy vacuum and TID lookup uses\n> > > bsearch(). There are the following challenges and limitations:\n> > >\n> > > 1. Don't allocate more than 1GB. There was a discussion to eliminate\n> > > this limitation by using MemoryContextAllocHuge() but there were\n> > > concerns about point 2[1].\n> >\n> > I think that the main problem with the 1GB limitation is that it is\n> > surprising -- it can cause disruption when we first exceed the magical\n> > limit of ~174 million TIDs. This can cause us to dirty index pages a\n> > second time when we might have been able to just do it once with\n> > sufficient memory for TIDs. OTOH there are actually cases where having\n> > less memory for TIDs makes performance *better* because of locality\n> > effects. This perverse behavior with memory sizing isn't a rare case\n> > that we can safely ignore -- unfortunately it's fairly common.\n> >\n> > My point is that we should be careful to choose the correct goal.\n> > Obviously memory use matters. But it might be more helpful to think of\n> > memory use as just a proxy for what truly matters, not a goal in\n> > itself. It's hard to know what this means (what is the \"real goal\"?),\n> > and hard to measure it even if you know for sure. It could still be\n> > useful to think of it like this.\n>\n> As I wrote in the first email, I think there are two important factors\n> in index vacuuming performance: the performance to check if heap TID\n> that an index tuple points to is dead, and the number of times to\n> perform index bulk-deletion. The flame graph I attached in the first\n> mail shows CPU spent much time on lazy_tid_reaped() but vacuum is a\n> disk-intensive operation in practice. Given that most index AM's\n> bulk-deletion does a full index scan and a table could have multiple\n> indexes, reducing the number of times to perform index bulk-deletion\n> really contributes to reducing the execution time, especially for\n> large tables. I think that a more compact data structure for dead\n> tuple TIDs is one of the ways to achieve that.\n>\n> >\n> > > A run container is selected in this test case, using 4 bytes for each block.\n> > >\n> > > Execution Time Memory Usage\n> > > array 8,883.03 600,008,248\n> > > intset 7,358.23 100,671,488\n> > > tbm 758.81 100,671,544\n> > > rtbm 764.33 29,384,816\n> > >\n> > > Overall, 'rtbm' has a much better lookup performance and good memory\n> > > usage especially if there are relatively many dead tuples. However, in\n> > > some cases, 'intset' and 'array' have a better memory usage.\n> >\n> > This seems very promising.\n> >\n> > I wonder how much you have thought about the index AM side. It makes\n> > sense to initially evaluate these techniques using this approach of\n> > separating the data structure from how it is used by VACUUM -- I think\n> > that that was a good idea. But at the same time there may be certain\n> > important theoretical questions that cannot be answered this way --\n> > questions about how everything \"fits together\" in a real VACUUM might\n> > matter a lot. You've probably thought about this at least a little\n> > already. Curious to hear how you think it \"fits together\" with the\n> > work that you've done already.\n>\n> Yeah, that definitely needs to be considered. Currently, what we need\n> for the dead tuple storage for lazy vacuum are store, lookup, and\n> iteration. And given the parallel vacuum, it has to be able to be\n> allocated on DSM or DSA. While implementing the PoC code, I'm trying\n> to integrate it with the current lazy vacuum code. As far as I've seen\n> so far, the integration is not hard, at least with the *current* lazy\n> vacuum code and index AMs code.\n>\n> >\n> > The loop inside btvacuumpage() makes each loop iteration call the\n> > callback -- this is always a call to lazy_tid_reaped() in practice.\n> > And that's where we do binary searches. These binary searches are\n> > usually where we see a huge number of cycles spent when we look at\n> > profiles, including the profile that produced your flame graph. But I\n> > worry that that might be a bit misleading -- the way that profilers\n> > attribute costs is very complicated and can never be fully trusted.\n> > While it is true that lazy_tid_reaped() often accesses main memory,\n> > which will of course add a huge amount of latency and make it a huge\n> > bottleneck, the \"big picture\" is still relevant.\n> >\n> > I think that the compiler currently has to make very conservative\n> > assumptions when generating the machine code used by the loop inside\n> > btvacuumpage(), which calls through an opaque function pointer at\n> > least once per loop iteration -- anything can alias, so the compiler\n> > must be conservative. The data dependencies are hard for both the\n> > compiler and the CPU to analyze. The cost of using a function pointer\n> > compared to a direct function call is usually quite low, but there are\n> > important exceptions -- cases where it prevents other useful\n> > optimizations. Maybe this is an exception.\n> >\n> > I wonder how much it would help to break up that loop into two loops.\n> > Make the callback into a batch operation that generates state that\n> > describes what to do with each and every index tuple on the leaf page.\n> > The first loop would build a list of TIDs, then you'd call into\n> > vacuumlazy.c and get it to process the TIDs, and finally the second\n> > loop would physically delete the TIDs that need to be deleted. This\n> > would mean that there would be only one call per leaf page per\n> > btbulkdelete(). This would reduce the number of calls to the callback\n> > by at least 100x, and maybe more than 1000x.\n> >\n> > This approach would make btbulkdelete() similar to\n> > _bt_simpledel_pass() + _bt_delitems_delete_check(). This is not really\n> > an independent idea to your ideas -- I imagine that this would work\n> > far better when combined with a more compact data structure, which is\n> > naturally more capable of batch processing than a simple array of\n> > TIDs. Maybe this will help the compiler and the CPU to fully\n> > understand the *natural* data dependencies, so that they can be as\n> > effective as possible in making the code run fast. It's possible that\n> > a modern CPU will be able to *hide* the latency more intelligently\n> > than what we have today. The latency is such a big problem that we may\n> > be able to justify \"wasting\" other CPU resources, just because it\n> > sometimes helps with hiding the latency. For example, it might\n> > actually be okay to sort all of the TIDs on the page to make the bulk\n> > processing work -- though you might still do a precheck that is\n> > similar to the precheck inside lazy_tid_reaped() that was added by you\n> > in commit bbaf315309e.\n>\n> Interesting idea. I remember you mentioned this idea somewhere and\n> I've considered this idea too while implementing the PoC code. It's\n> definitely worth trying. Maybe we can write a patch for this as a\n> separate patch? It will change index AM and could improve also the\n> current bulk-deletion. We can consider a better data structure on top\n> of this idea.\n>\n> Regards,\n>\n> --\n> Masahiko Sawada\n> EDB: https://www.enterprisedb.com/\n>\n>\n\n\n", "msg_date": "Thu, 8 Jul 2021 15:40:39 +0200", "msg_from": "Hannu Krosing <hannuk@google.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Resending as forgot to send to the list (thanks Peter :) )\n\nOn Wed, Jul 7, 2021 at 10:24 PM Peter Geoghegan <pg@bowt.ie> wrote:\n>\n> The loop inside btvacuumpage() makes each loop iteration call the\n> callback -- this is always a call to lazy_tid_reaped() in practice.\n> And that's where we do binary searches. These binary searches are\n> usually where we see a huge number of cycles spent when we look at\n> profiles, including the profile that produced your flame graph. But I\n> worry that that might be a bit misleading -- the way that profilers\n> attribute costs is very complicated and can never be fully trusted.\n> While it is true that lazy_tid_reaped() often accesses main memory,\n> which will of course add a huge amount of latency and make it a huge\n> bottleneck, the \"big picture\" is still relevant.\n\nThis is why I have mainly focused on making it possible to use SIMD and\nrun 4-8 binary searches in parallel, mostly 8, for AVX2.\n\nHow I am approaching this is separating \"page search\" tyo run over a\n(naturally) sorted array of 32 bit page pointers and only when the\npage is found the indexes in this array are used to look up the\nin-page bitmaps.\nThis allows the heavier bsearch activity to run on smaller range of\nmemory, hopefully reducing the cache trashing.\n\nThere are opportunities to optimise this further for cash hits, buy\ncollecting the tids from indexes in larger patches and then\nconstraining the searches in the main is-deleted-bitmap to run over\nsections of it, but at some point this becomes a very complex\nbalancing act, as the manipulation of the bits-to-check from indexes\nalso takes time, not to mention the need to release the index pages\nand then later chase the tid pointers in case they have moved while\nchecking them.\n\nI have not measured anything yet, but one of my concerns in case of\nvery large dead tuple collections searched by 8-way parallel bsearch\ncould actually get close to saturating RAM bandwidth by reading (8 x\n32bits x cache-line-size) bytes from main memory every few cycles, so\nwe may need some inner-loop level throttling similar to current\nvacuum_cost_limit for data pages.\n\n> I think that the compiler currently has to make very conservative\n> assumptions when generating the machine code used by the loop inside\n> btvacuumpage(), which calls through an opaque function pointer at\n> least once per loop iteration -- anything can alias, so the compiler\n> must be conservative.\n\nDefinitely this! The lookup function needs to be turned into an inline\nfunction or #define as well to give the compiler maximum freedoms.\n\n> The data dependencies are hard for both the\n> compiler and the CPU to analyze. The cost of using a function pointer\n> compared to a direct function call is usually quite low, but there are\n> important exceptions -- cases where it prevents other useful\n> optimizations. Maybe this is an exception.\n\nYes. Also this could be a place where unrolling the loop could make a\nreal difference.\n\nMaybe not unrolling the full 32 loops for 32 bit bserach, but\nsomething like 8-loop unroll for getting most of the benefit.\n\nThe 32x unroll would not be really that bad for performance if all 32\nloops were needed, but mostly we would need to jump into last 10 to 20\nloops for lookup min 1000 to 1000000 pages and I suspect this is such\na weird corner case that compiler is really unlikely to have this\noptimisation supported. Of course I may be wrong and ith is a common\nenough case for the optimiser.\n\n>\n> I wonder how much it would help to break up that loop into two loops.\n> Make the callback into a batch operation that generates state that\n> describes what to do with each and every index tuple on the leaf page.\n> The first loop would build a list of TIDs, then you'd call into\n> vacuumlazy.c and get it to process the TIDs, and finally the second\n> loop would physically delete the TIDs that need to be deleted. This\n> would mean that there would be only one call per leaf page per\n> btbulkdelete(). This would reduce the number of calls to the callback\n> by at least 100x, and maybe more than 1000x.\n\nWhile it may make sense to have different bitmap encodings for\ndifferent distributions, it likely would not be good for optimisations\nif all these are used at the same time.\n\nThis is why I propose the first bitmap collecting phase to collect\ninto a file and then - when reading into memory for lookups phase -\npossibly rewrite the initial structure to something else if it sees\nthat it is more efficient. Like for example where the first half of\nthe file consists of only empty pages.\n\n> This approach would make btbulkdelete() similar to\n> _bt_simpledel_pass() + _bt_delitems_delete_check(). This is not really\n> an independent idea to your ideas -- I imagine that this would work\n> far better when combined with a more compact data structure, which is\n> naturally more capable of batch processing than a simple array of\n> TIDs. Maybe this will help the compiler and the CPU to fully\n> understand the *natural* data dependencies, so that they can be as\n> effective as possible in making the code run fast. It's possible that\n> a modern CPU will be able to *hide* the latency more intelligently\n> than what we have today. The latency is such a big problem that we may\n> be able to justify \"wasting\" other CPU resources, just because it\n> sometimes helps with hiding the latency. For example, it might\n> actually be okay to sort all of the TIDs on the page to make the bulk\n> processing work\n\nThen again it may be so much extra work that it starts to dominate\nsome parts of profiles.\n\nFor example see the work that was done in improving the mini-vacuum\npart where it was actually faster to copy data out to a separate\nbuffer and then back in than shuffle it around inside the same 8k page\n:)\n\nSo only testing will tell.\n\n> -- though you might still do a precheck that is\n> similar to the precheck inside lazy_tid_reaped() that was added by you\n> in commit bbaf315309e.\n>\n> Of course it's very easy to be wrong about stuff like this. But it\n> might not be that hard to prototype. You can literally copy and paste\n> code from _bt_delitems_delete_check() to do this. It does the same\n> basic thing already.\n\nAlso a lot of testing would be needed to figure out which strategy\nfits best for which distribution of dead tuples, and possibly their\nrelation to the order of tuples to check from indexes .\n\n\nCheers\n\n--\nHannu Krosing\nGoogle Cloud - We have a long list of planned contributions and we are hiring.\nContact me if interested.\n\n\n", "msg_date": "Thu, 8 Jul 2021 22:53:20 +0200", "msg_from": "Hannu Krosing <hannuk@google.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jul 8, 2021 at 1:47 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> As I wrote in the first email, I think there are two important factors\n> in index vacuuming performance: the performance to check if heap TID\n> that an index tuple points to is dead, and the number of times to\n> perform index bulk-deletion. The flame graph I attached in the first\n> mail shows CPU spent much time on lazy_tid_reaped() but vacuum is a\n> disk-intensive operation in practice.\n\nMaybe. But I recently bought an NVME SSD that can read at over\n6GB/second. So \"disk-intensive\" is not what it used to be -- at least\nnot for reads. In general it's not good if we do multiple scans of an\nindex -- no question. But there is a danger in paying a little too\nmuch attention to what is true in general -- we should not ignore what\nmight be true in specific cases either. Maybe we can solve some\nproblems by spilling the TID data structure to disk -- if we trade\nsequential I/O for random I/O, we may be able to do only one pass over\nthe index (especially when we have *almost* enough memory to fit all\nTIDs, but not quite enough).\n\nThe big problem with multiple passes over the index is not the extra\nread bandwidth -- it's the extra page dirtying (writes), especially\nwith things like indexes on UUID columns. We want to dirty each leaf\npage in each index at most once per VACUUM, and should be willing to\npay some cost in order to get a larger benefit with page dirtying.\nAfter all, writes are much more expensive on modern flash devices --\nif we have to do more random read I/O to spill the TIDs then that\nmight actually be 100% worth it. And, we don't need much memory for\nsomething that works well as a negative cache, either -- so maybe the\nextra random read I/O needed to spill the TIDs will be very limited\nanyway.\n\nThere are many possibilities. You can probably think of other\ntrade-offs yourself. We could maybe use a cost model for all this --\nit is a little like a hash join IMV. This is just something to think\nabout while refining the design.\n\n> Interesting idea. I remember you mentioned this idea somewhere and\n> I've considered this idea too while implementing the PoC code. It's\n> definitely worth trying. Maybe we can write a patch for this as a\n> separate patch? It will change index AM and could improve also the\n> current bulk-deletion. We can consider a better data structure on top\n> of this idea.\n\nI'm happy to write it as a separate patch, either by leaving it to you\nor by collaborating directly. It's not necessary to tie it to the\nfirst patch. But at the same time it is highly related to what you're\nalready doing.\n\nAs I said I am totally prepared to be wrong here. But it seems worth\nit to try. In Postgres 14, the _bt_delitems_vacuum() function (which\nactually carries out VACUUM's physical page modifications to a leaf\npage) is almost identical to _bt_delitems_delete(). And\n_bt_delitems_delete() was already built with these kinds of problems\nin mind -- it batches work to get the most out of synchronizing with\ndistant state describing which tuples to delete. It's not exactly the\nsame situation, but it's *kinda* similar. More importantly, it's a\nrelatively cheap and easy experiment to run, since we already have\nmost of what we need (we can take it from\n_bt_delitems_delete_check()).\n\nUsually this kind of micro optimization is not very valuable -- 99.9%+\nof all code just isn't that sensitive to having the right\noptimizations. But this is one of the rare important cases where we\nreally should look at the raw machine code, and do some kind of\nmicroarchitectural level analysis through careful profiling, using\ntools like perf. The laws of physics (or electronic engineering) make\nit inevitable that searching for TIDs to match is going to be kind of\nslow. But we should at least make sure that we use every trick\navailable to us to reduce the bottleneck, since it really does matter\na lot to users. Users should be able to expect that this code will at\nleast be as fast as the hardware that they paid for can allow (or\nclose to it). There is a great deal of microarchitectural\nsophistication with modern CPUs, much of which is designed to make\nproblems like this one less bad [1].\n\n[1] https://www.agner.org/optimize/microarchitecture.pdf\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Thu, 8 Jul 2021 14:20:12 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jul 8, 2021 at 1:53 PM Hannu Krosing <hannuk@google.com> wrote:\n> How I am approaching this is separating \"page search\" tyo run over a\n> (naturally) sorted array of 32 bit page pointers and only when the\n> page is found the indexes in this array are used to look up the\n> in-page bitmaps.\n> This allows the heavier bsearch activity to run on smaller range of\n> memory, hopefully reducing the cache trashing.\n\nI think that the really important thing is to figure out roughly the\nright data structure first.\n\n> There are opportunities to optimise this further for cash hits, buy\n> collecting the tids from indexes in larger patches and then\n> constraining the searches in the main is-deleted-bitmap to run over\n> sections of it, but at some point this becomes a very complex\n> balancing act, as the manipulation of the bits-to-check from indexes\n> also takes time, not to mention the need to release the index pages\n> and then later chase the tid pointers in case they have moved while\n> checking them.\n\nI would say that 200 TIDs per leaf page is common and ~1350 TIDs per\nleaf page is not uncommon (with deduplication). Seems like that might\nbe enough?\n\n> I have not measured anything yet, but one of my concerns in case of\n> very large dead tuple collections searched by 8-way parallel bsearch\n> could actually get close to saturating RAM bandwidth by reading (8 x\n> 32bits x cache-line-size) bytes from main memory every few cycles, so\n> we may need some inner-loop level throttling similar to current\n> vacuum_cost_limit for data pages.\n\nIf it happens then it'll be a nice problem to have, I suppose.\n\n> Maybe not unrolling the full 32 loops for 32 bit bserach, but\n> something like 8-loop unroll for getting most of the benefit.\n\nMy current assumption is that we're bound by memory speed right now,\nand that that is the big bottleneck to eliminate -- we must keep the\nCPU busy with data to process first. That seems like the most\npromising thing to focus on right now.\n\n> While it may make sense to have different bitmap encodings for\n> different distributions, it likely would not be good for optimisations\n> if all these are used at the same time.\n\nTo some degree designs like Roaring bitmaps are just that -- a way of\ndynamically figuring out which strategy to use based on data\ncharacteristics.\n\n> This is why I propose the first bitmap collecting phase to collect\n> into a file and then - when reading into memory for lookups phase -\n> possibly rewrite the initial structure to something else if it sees\n> that it is more efficient. Like for example where the first half of\n> the file consists of only empty pages.\n\nYeah, I agree that something like that could make sense. Although\nrewriting it doesn't seem particularly promising, since we can easily\nmake it cheap to process any TID that falls into a range of blocks\nthat have no dead tuples. We don't need to rewrite the data structure\nto make it do that well, AFAICT.\n\nWhen I said that I thought of this a little like a hash join, I was\nbeing more serious than you might imagine. Note that the number of\nindex tuples that VACUUM will delete from each index can now be far\nless than the total number of TIDs stored in memory. So even when we\nhave (say) 20% of all of the TIDs from the table in our in memory list\nmanaged by vacuumlazy.c, it's now quite possible that VACUUM will only\nactually \"match\"/\"join\" (i.e. delete) as few as 2% of the index tuples\nit finds in the index (there really is no way to predict how many).\nThe opportunistic deletion stuff could easily be doing most of the\nrequired cleanup in an eager fashion following recent improvements --\nVACUUM need only take care of \"floating garbage\" these days. In other\nwords, thinking about this as something that is a little bit like a\nhash join makes sense because hash joins do very well with high join\nselectivity, and high join selectivity is common in the real world.\nThe intersection of TIDs from each leaf page with the in-memory TID\ndelete structure will often be very small indeed.\n\n> Then again it may be so much extra work that it starts to dominate\n> some parts of profiles.\n>\n> For example see the work that was done in improving the mini-vacuum\n> part where it was actually faster to copy data out to a separate\n> buffer and then back in than shuffle it around inside the same 8k page\n\nSome of what I'm saying is based on the experience of improving\nsimilar code used by index tuple deletion in Postgres 14. That did\nquite a lot of sorting of TIDs and things like that. In the end the\nsorting had no more than a negligible impact on performance. What\nreally mattered was that we efficiently coordinate with distant heap\npages that describe which index tuples we can delete from a given leaf\npage. Sorting hundreds of TIDs is cheap. Reading hundreds of random\nlocations in memory (or even far fewer) is not so cheap. It might even\nbe very slow indeed. Sorting in order to batch could end up looking\nlike cheap insurance that we should be glad to pay for.\n\n> So only testing will tell.\n\nTrue.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Thu, 8 Jul 2021 15:34:26 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jul 9, 2021 at 12:34 AM Peter Geoghegan <pg@bowt.ie> wrote:\n>\n...\n>\n> I would say that 200 TIDs per leaf page is common and ~1350 TIDs per\n> leaf page is not uncommon (with deduplication). Seems like that might\n> be enough?\n\nLikely yes, and also it would have the nice property of not changing\nthe index page locking behaviour.\n\nAre deduplicated tids in the leaf page already sorted in heap order ?\nThis could potentially simplify / speed up the sort.\n\n> > I have not measured anything yet, but one of my concerns in case of\n> > very large dead tuple collections searched by 8-way parallel bsearch\n> > could actually get close to saturating RAM bandwidth by reading (8 x\n> > 32bits x cache-line-size) bytes from main memory every few cycles, so\n> > we may need some inner-loop level throttling similar to current\n> > vacuum_cost_limit for data pages.\n>\n> If it happens then it'll be a nice problem to have, I suppose.\n>\n> > Maybe not unrolling the full 32 loops for 32 bit bserach, but\n> > something like 8-loop unroll for getting most of the benefit.\n>\n> My current assumption is that we're bound by memory speed right now,\n\nMost likely yes, and this should be also easy to check with manually\nunrolling perhaps 4 loops and measuring any speed increase.\n\n> and that that is the big bottleneck to eliminate -- we must keep the\n> CPU busy with data to process first. That seems like the most\n> promising thing to focus on right now.\n\nThis has actually two parts\n - trying to make sure that we can make as much as possible from cache\n - if we need to get out of cache then try to parallelise this as\nmuch as possible\n\nat the same time we need to watch that we are not making the index\ntuple preparation work so heavy that it starts to dominate over memory\naccess\n\n> > While it may make sense to have different bitmap encodings for\n> > different distributions, it likely would not be good for optimisations\n> > if all these are used at the same time.\n>\n> To some degree designs like Roaring bitmaps are just that -- a way of\n> dynamically figuring out which strategy to use based on data\n> characteristics.\n\nit is, but as I am keeping one eye open for vectorisation, I don't\nlike when different parts of the same bitmap have radically different\nencoding strategies.\n\n> > This is why I propose the first bitmap collecting phase to collect\n> > into a file and then - when reading into memory for lookups phase -\n> > possibly rewrite the initial structure to something else if it sees\n> > that it is more efficient. Like for example where the first half of\n> > the file consists of only empty pages.\n>\n> Yeah, I agree that something like that could make sense. Although\n> rewriting it doesn't seem particularly promising,\n\nyeah, I hope to prove (or verify :) ) the structure is good enough so\nthat it does not need the rewrite.\n\n> since we can easily\n> make it cheap to process any TID that falls into a range of blocks\n> that have no dead tuples.\n\nI actually meant the opposite case, where we could replace a full 80\nbytes 291-bit \"all dead\" bitmap with just a range - int4 for page and\ntwo int2-s for min and max tid-in page for extra 10x reduction, on top\nof original 21x reduction from current 6 bytes / bit encoding to my\npage_bsearch_vector bitmaps which encodes one page to maximum of 80\nbytes (5 x int4 sub-page pointers + 5 x int4 bitmaps).\n\nI also started out by investigating RoaringBitmaps, but when I\nrealized that we will likely have to rewrite it anyway I continued\nworking on getting to a single uniform encoding which fits most use\ncases Good Enough and then use that uniformity to enable the compiler\nto do its optimisation and hopefully also vectoriziation magic.\n\n> We don't need to rewrite the data structure\n> to make it do that well, AFAICT.\n>\n> When I said that I thought of this a little like a hash join, I was\n> being more serious than you might imagine. Note that the number of\n> index tuples that VACUUM will delete from each index can now be far\n> less than the total number of TIDs stored in memory. So even when we\n> have (say) 20% of all of the TIDs from the table in our in memory list\n> managed by vacuumlazy.c, it's now quite possible that VACUUM will only\n> actually \"match\"/\"join\" (i.e. delete) as few as 2% of the index tuples\n> it finds in the index (there really is no way to predict how many).\n> The opportunistic deletion stuff could easily be doing most of the\n> required cleanup in an eager fashion following recent improvements --\n> VACUUM need only take care of \"floating garbage\" these days.\n\nOk, this points to the need to mainly optimise for quite sparse\npopulation of dead tuples, which is still mainly clustered page-wise ?\n\n> In other\n> words, thinking about this as something that is a little bit like a\n> hash join makes sense because hash joins do very well with high join\n> selectivity, and high join selectivity is common in the real world.\n> The intersection of TIDs from each leaf page with the in-memory TID\n> delete structure will often be very small indeed.\n\nThe hard to optimize case is still when we have dead tuple counts in\nhundreds of millions, or even billions, like on a HTAP database after\na few hours of OLAP query have accumulated loads of dead tuples in\ntables getting heavy OLTP traffic.\n\nThere of course we could do a totally different optimisation, where we\nalso allow reaping tuples newer than the OLAP queries snapshot if we\ncan prove that when the snapshot moves forward next time, it has to\njump over said transactions making them indeed DEAD and not RECENTLY\nDEAD. Currently we let a single OLAP query ruin everything :)\n\n> > Then again it may be so much extra work that it starts to dominate\n> > some parts of profiles.\n> >\n> > For example see the work that was done in improving the mini-vacuum\n> > part where it was actually faster to copy data out to a separate\n> > buffer and then back in than shuffle it around inside the same 8k page\n>\n> Some of what I'm saying is based on the experience of improving\n> similar code used by index tuple deletion in Postgres 14. That did\n> quite a lot of sorting of TIDs and things like that. In the end the\n> sorting had no more than a negligible impact on performance.\n\nGood to know :)\n\n> What\n> really mattered was that we efficiently coordinate with distant heap\n> pages that describe which index tuples we can delete from a given leaf\n> page. Sorting hundreds of TIDs is cheap. Reading hundreds of random\n> locations in memory (or even far fewer) is not so cheap. It might even\n> be very slow indeed. Sorting in order to batch could end up looking\n> like cheap insurance that we should be glad to pay for.\n\nIf the most expensive operation is sorting a few hundred of tids, then\nthis should be fast enough.\n\nMy worries were more that after the sorting we can not to dsimple\nindex lookups for them, but each needs to be found via bseach or maybe\neven just search if that is faster under some size limit, and that\nthese could add up. Or some other needed thing that also has to be\ndone, like allocating extra memory or moving other data around in a\nway that CPU does not like.\n\nCheers\n-----\nHannu Krosing\nGoogle Cloud - We have a long list of planned contributions and we are hiring.\nContact me if interested.\n\n\n", "msg_date": "Fri, 9 Jul 2021 01:21:25 +0200", "msg_from": "Hannu Krosing <hannuk@google.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\n\nOn 2021-07-07 20:46:38 +0900, Masahiko Sawada wrote:\n> 1. Don't allocate more than 1GB. There was a discussion to eliminate\n> this limitation by using MemoryContextAllocHuge() but there were\n> concerns about point 2[1].\n>\n> 2. Allocate the whole memory space at once.\n>\n> 3. Slow lookup performance (O(logN)).\n>\n> I’ve done some experiments in this area and would like to share the\n> results and discuss ideas.\n\nYea, this is a serious issue.\n\n\n3) could possibly be addressed to a decent degree without changing the\nfundamental datastructure too much. There's some sizable and trivial\nwins by just changing vac_cmp_itemptr() to compare int64s and by using\nan open coded bsearch().\n\nThe big problem with bsearch isn't imo the O(log(n)) complexity - it's\nthat it has an abominally bad cache locality. And that can be addressed\nhttps://arxiv.org/ftp/arxiv/papers/1509/1509.05053.pdf\n\nImo 2) isn't really that a hard problem to improve, even if we were to\nstay with the current bsearch approach. Reallocation with an aggressive\ngrowth factor or such isn't that bad.\n\n\nThat's not to say we ought to stay with binary search...\n\n\n\n> Problems Solutions\n> ===============\n>\n> Firstly, I've considered using existing data structures:\n> IntegerSet(src/backend/lib/integerset.c) and\n> TIDBitmap(src/backend/nodes/tidbitmap.c). Those address point 1 but\n> only either point 2 or 3. IntegerSet uses lower memory thanks to\n> simple-8b encoding but is slow at lookup, still O(logN), since it’s a\n> tree structure. On the other hand, TIDBitmap has a good lookup\n> performance, O(1), but could unnecessarily use larger memory in some\n> cases since it always allocates the space for bitmap enough to store\n> all possible offsets. With 8kB blocks, the maximum number of line\n> pointers in a heap page is 291 (c.f., MaxHeapTuplesPerPage) so the\n> bitmap is 40 bytes long and we always need 46 bytes in total per block\n> including other meta information.\n\nImo tidbitmap isn't particularly good, even in the current use cases -\nit's constraining in what we can store (a problem for other AMs), not\nactually that dense, the lossy mode doesn't choose what information to\nloose well etc.\n\nIt'd be nice if we came up with a datastructure that could also replace\nthe bitmap scan cases.\n\n\n> The data structure is somewhat similar to TIDBitmap. It consists of\n> the hash table and the container area; the hash table has entries per\n> block and each block entry allocates its memory space, called a\n> container, in the container area to store its offset numbers. The\n> container area is actually an array of bytes and can be enlarged as\n> needed. In the container area, the data representation of offset\n> numbers varies depending on their cardinality. It has three container\n> types: array, bitmap, and run.\n\nNot a huge fan of encoding this much knowledge about the tid layout...\n\n\n> For example, if there are two dead tuples at offset 1 and 150, it uses\n> the array container that has an array of two 2-byte integers\n> representing 1 and 150, using 4 bytes in total. If we used the bitmap\n> container in this case, we would need 20 bytes instead. On the other\n> hand, if there are consecutive 20 dead tuples from offset 1 to 20, it\n> uses the run container that has an array of 2-byte integers. The first\n> value in each pair represents a starting offset number, whereas the\n> second value represents its length. Therefore, in this case, the run\n> container uses only 4 bytes in total. Finally, if there are dead\n> tuples at every other offset from 1 to 100, it uses the bitmap\n> container that has an uncompressed bitmap, using 13 bytes. We need\n> another 16 bytes per block entry for hash table entry.\n>\n> The lookup complexity of a bitmap container is O(1) whereas the one of\n> an array and a run container is O(N) or O(logN) but the number of\n> elements in those two containers should not be large it would not be a\n> problem.\n\nHm. Why is O(N) not an issue? Consider e.g. the case of a table in which\nmany tuples have been deleted. In cases where the \"run\" storage is\ncheaper (e.g. because there's high offset numbers due to HOT pruning),\nwe could end up regularly scanning a few hundred entries for a\nmatch. That's not cheap anymore.\n\n\n> Evaluation\n> ========\n>\n> Before implementing this idea and integrating it with lazy vacuum\n> code, I've implemented a benchmark tool dedicated to evaluating\n> lazy_tid_reaped() performance[4].\n\nGood idea!\n\n\n> In all test cases, I simulated that the table has 1,000,000 blocks and\n> every block has at least one dead tuple.\n\nThat doesn't strike me as a particularly common scenario? I think it's\nquite rare for there to be so evenly but sparse dead tuples. In\nparticularly it's very common for there to be long runs of dead tuples\nseparated by long ranges of no dead tuples at all...\n\n\n> The benchmark scenario is that for\n> each virtual heap tuple we check if there is its TID in the dead\n> tuple storage. Here are the results of execution time in milliseconds\n> and memory usage in bytes:\n\nIn which order are the dead tuples checked? Looks like in sequential\norder? In the case of an index over a column that's not correlated with\nthe heap order the lookups are often much more random - which can\ninfluence lookup performance drastically, due to cache differences in\ncache locality. Which will make some structures look worse/better than\nothers.\n\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Thu, 8 Jul 2021 20:53:32 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2021-07-08 20:53:32 -0700, Andres Freund wrote:\n> On 2021-07-07 20:46:38 +0900, Masahiko Sawada wrote:\n> > 1. Don't allocate more than 1GB. There was a discussion to eliminate\n> > this limitation by using MemoryContextAllocHuge() but there were\n> > concerns about point 2[1].\n> >\n> > 2. Allocate the whole memory space at once.\n> >\n> > 3. Slow lookup performance (O(logN)).\n> >\n> > I’ve done some experiments in this area and would like to share the\n> > results and discuss ideas.\n>\n> Yea, this is a serious issue.\n>\n>\n> 3) could possibly be addressed to a decent degree without changing the\n> fundamental datastructure too much. There's some sizable and trivial\n> wins by just changing vac_cmp_itemptr() to compare int64s and by using\n> an open coded bsearch().\n\nJust using itemptr_encode() makes array in test #1 go from 8s to 6.5s on my\nmachine.\n\nAnother thing I just noticed is that you didn't include the build times for the\ndatastructures. They are lower than the lookups currently, but it does seem\nlike a relevant thing to measure as well. E.g. for #1 I see the following build\ntimes\n\narray 24.943 ms\ntbm 206.456 ms\nintset 93.575 ms\nvtbm 134.315 ms\nrtbm 145.964 ms\n\nthat's a significant range...\n\n\nRandomizing the lookup order (using a random shuffle in\ngenerate_index_tuples()) changes the benchmark results for #1 significantly:\n\n shuffled time unshuffled time\narray 6551.726 ms 6478.554 ms\nintset 67590.879 ms 10815.810 ms\nrtbm 17992.487 ms 2518.492 ms\ntbm 364.917 ms 360.128 ms\nvtbm 12227.884 ms 1288.123 ms\n\n\n\nFWIW, I get an assertion failure when using an assertion build:\n\n#2 0x0000561800ea02e0 in ExceptionalCondition (conditionName=0x7f9115a88e91 \"found\", errorType=0x7f9115a88d11 \"FailedAssertion\", \n fileName=0x7f9115a88e8a \"rtbm.c\", lineNumber=242) at /home/andres/src/postgresql/src/backend/utils/error/assert.c:69\n#3 0x00007f9115a87645 in rtbm_add_tuples (rtbm=0x561806293280, blkno=0, offnums=0x7fffdccabb00, nitems=10) at rtbm.c:242\n#4 0x00007f9115a8363d in load_rtbm (rtbm=0x561806293280, itemptrs=0x7f908a203050, nitems=10000000) at bdbench.c:618\n#5 0x00007f9115a834b9 in rtbm_attach (lvtt=0x7f9115a8c300 <LVTestSubjects+352>, nitems=10000000, minblk=2139062143, maxblk=2139062143, maxoff=32639)\n at bdbench.c:587\n#6 0x00007f9115a83837 in attach (lvtt=0x7f9115a8c300 <LVTestSubjects+352>, nitems=10000000, minblk=2139062143, maxblk=2139062143, maxoff=32639)\n at bdbench.c:658\n#7 0x00007f9115a84190 in attach_dead_tuples (fcinfo=0x56180322d690) at bdbench.c:873\n\nI assume you just inverted the Assert(found) assertion?\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Thu, 8 Jul 2021 22:37:34 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jul 9, 2021 at 12:53 PM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n>\n> On 2021-07-07 20:46:38 +0900, Masahiko Sawada wrote:\n> > 1. Don't allocate more than 1GB. There was a discussion to eliminate\n> > this limitation by using MemoryContextAllocHuge() but there were\n> > concerns about point 2[1].\n> >\n> > 2. Allocate the whole memory space at once.\n> >\n> > 3. Slow lookup performance (O(logN)).\n> >\n> > I’ve done some experiments in this area and would like to share the\n> > results and discuss ideas.\n>\n> Yea, this is a serious issue.\n>\n>\n> 3) could possibly be addressed to a decent degree without changing the\n> fundamental datastructure too much. There's some sizable and trivial\n> wins by just changing vac_cmp_itemptr() to compare int64s and by using\n> an open coded bsearch().\n>\n> The big problem with bsearch isn't imo the O(log(n)) complexity - it's\n> that it has an abominally bad cache locality. And that can be addressed\n> https://arxiv.org/ftp/arxiv/papers/1509/1509.05053.pdf\n>\n> Imo 2) isn't really that a hard problem to improve, even if we were to\n> stay with the current bsearch approach. Reallocation with an aggressive\n> growth factor or such isn't that bad.\n>\n>\n> That's not to say we ought to stay with binary search...\n>\n>\n>\n> > Problems Solutions\n> > ===============\n> >\n> > Firstly, I've considered using existing data structures:\n> > IntegerSet(src/backend/lib/integerset.c) and\n> > TIDBitmap(src/backend/nodes/tidbitmap.c). Those address point 1 but\n> > only either point 2 or 3. IntegerSet uses lower memory thanks to\n> > simple-8b encoding but is slow at lookup, still O(logN), since it’s a\n> > tree structure. On the other hand, TIDBitmap has a good lookup\n> > performance, O(1), but could unnecessarily use larger memory in some\n> > cases since it always allocates the space for bitmap enough to store\n> > all possible offsets. With 8kB blocks, the maximum number of line\n> > pointers in a heap page is 291 (c.f., MaxHeapTuplesPerPage) so the\n> > bitmap is 40 bytes long and we always need 46 bytes in total per block\n> > including other meta information.\n>\n> Imo tidbitmap isn't particularly good, even in the current use cases -\n> it's constraining in what we can store (a problem for other AMs), not\n> actually that dense, the lossy mode doesn't choose what information to\n> loose well etc.\n>\n> It'd be nice if we came up with a datastructure that could also replace\n> the bitmap scan cases.\n\nAgreed.\n\n>\n>\n> > The data structure is somewhat similar to TIDBitmap. It consists of\n> > the hash table and the container area; the hash table has entries per\n> > block and each block entry allocates its memory space, called a\n> > container, in the container area to store its offset numbers. The\n> > container area is actually an array of bytes and can be enlarged as\n> > needed. In the container area, the data representation of offset\n> > numbers varies depending on their cardinality. It has three container\n> > types: array, bitmap, and run.\n>\n> Not a huge fan of encoding this much knowledge about the tid layout...\n>\n>\n> > For example, if there are two dead tuples at offset 1 and 150, it uses\n> > the array container that has an array of two 2-byte integers\n> > representing 1 and 150, using 4 bytes in total. If we used the bitmap\n> > container in this case, we would need 20 bytes instead. On the other\n> > hand, if there are consecutive 20 dead tuples from offset 1 to 20, it\n> > uses the run container that has an array of 2-byte integers. The first\n> > value in each pair represents a starting offset number, whereas the\n> > second value represents its length. Therefore, in this case, the run\n> > container uses only 4 bytes in total. Finally, if there are dead\n> > tuples at every other offset from 1 to 100, it uses the bitmap\n> > container that has an uncompressed bitmap, using 13 bytes. We need\n> > another 16 bytes per block entry for hash table entry.\n> >\n> > The lookup complexity of a bitmap container is O(1) whereas the one of\n> > an array and a run container is O(N) or O(logN) but the number of\n> > elements in those two containers should not be large it would not be a\n> > problem.\n>\n> Hm. Why is O(N) not an issue? Consider e.g. the case of a table in which\n> many tuples have been deleted. In cases where the \"run\" storage is\n> cheaper (e.g. because there's high offset numbers due to HOT pruning),\n> we could end up regularly scanning a few hundred entries for a\n> match. That's not cheap anymore.\n\nWith 8kB blocks, the maximum size of a bitmap container is 37 bytes.\nIOW, other two types of containers are always smaller than 37 bytes.\nSince the run container uses 4 bytes per run, the number of runs in a\nrun container never be more than 9. Even with 32kB blocks, we don’t\nhave more than 37 runs. So I think N is small enough in this case.\n\n>\n>\n> > Evaluation\n> > ========\n> >\n> > Before implementing this idea and integrating it with lazy vacuum\n> > code, I've implemented a benchmark tool dedicated to evaluating\n> > lazy_tid_reaped() performance[4].\n>\n> Good idea!\n>\n>\n> > In all test cases, I simulated that the table has 1,000,000 blocks and\n> > every block has at least one dead tuple.\n>\n> That doesn't strike me as a particularly common scenario? I think it's\n> quite rare for there to be so evenly but sparse dead tuples. In\n> particularly it's very common for there to be long runs of dead tuples\n> separated by long ranges of no dead tuples at all...\n\nAgreed. I'll test with such scenarios.\n\n>\n>\n> > The benchmark scenario is that for\n> > each virtual heap tuple we check if there is its TID in the dead\n> > tuple storage. Here are the results of execution time in milliseconds\n> > and memory usage in bytes:\n>\n> In which order are the dead tuples checked? Looks like in sequential\n> order? In the case of an index over a column that's not correlated with\n> the heap order the lookups are often much more random - which can\n> influence lookup performance drastically, due to cache differences in\n> cache locality. Which will make some structures look worse/better than\n> others.\n\nGood point. It's sequential order, which is not good. I'll test again\nafter shuffling virtual index tuples.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Fri, 9 Jul 2021 15:35:17 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jul 9, 2021 at 2:37 PM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n> On 2021-07-08 20:53:32 -0700, Andres Freund wrote:\n> > On 2021-07-07 20:46:38 +0900, Masahiko Sawada wrote:\n> > > 1. Don't allocate more than 1GB. There was a discussion to eliminate\n> > > this limitation by using MemoryContextAllocHuge() but there were\n> > > concerns about point 2[1].\n> > >\n> > > 2. Allocate the whole memory space at once.\n> > >\n> > > 3. Slow lookup performance (O(logN)).\n> > >\n> > > I’ve done some experiments in this area and would like to share the\n> > > results and discuss ideas.\n> >\n> > Yea, this is a serious issue.\n> >\n> >\n> > 3) could possibly be addressed to a decent degree without changing the\n> > fundamental datastructure too much. There's some sizable and trivial\n> > wins by just changing vac_cmp_itemptr() to compare int64s and by using\n> > an open coded bsearch().\n>\n> Just using itemptr_encode() makes array in test #1 go from 8s to 6.5s on my\n> machine.\n>\n> Another thing I just noticed is that you didn't include the build times for the\n> datastructures. They are lower than the lookups currently, but it does seem\n> like a relevant thing to measure as well. E.g. for #1 I see the following build\n> times\n>\n> array 24.943 ms\n> tbm 206.456 ms\n> intset 93.575 ms\n> vtbm 134.315 ms\n> rtbm 145.964 ms\n>\n> that's a significant range...\n\nGood point. I got similar results when measuring on my machine:\n\narray 57.987 ms\ntbm 297.720 ms\nintset 113.796 ms\nvtbm 165.268 ms\nrtbm 199.658 ms\n\n>\n> Randomizing the lookup order (using a random shuffle in\n> generate_index_tuples()) changes the benchmark results for #1 significantly:\n>\n> shuffled time unshuffled time\n> array 6551.726 ms 6478.554 ms\n> intset 67590.879 ms 10815.810 ms\n> rtbm 17992.487 ms 2518.492 ms\n> tbm 364.917 ms 360.128 ms\n> vtbm 12227.884 ms 1288.123 ms\n\nI believe that in your test, tbm_reaped() actually always returned\ntrue. That could explain tbm was very fast in both cases. Since\nTIDBitmap in the core doesn't support the existence check tbm_reaped()\nin bdbench.c always returns true. I added a patch in the repository to\nadd existence check support to TIDBitmap, although it assumes bitmap\nnever be lossy.\n\nThat being said, I'm surprised that rtbm is slower than array even in\nthe unshuffled case. I've also measured the shuffle cases and got\ndifferent results. To be clear, I used prepare() SQL function to\nprepare both virtual dead tuples and index tuples, load them by\nattach_dead_tuples() SQL function, and executed bench() SQL function\nfor each data structure. Here are the results:\n\n shuffled time unshuffled time\narray 88899.513 ms 12616.521 ms\nintset 73476.055 ms 10063.405 ms\nrtbm 22264.671 ms 2073.171 ms\ntbm 10285.092 ms 1417.312 ms\nvtbm 14488.581 ms 1240.666 ms\n\n>\n> FWIW, I get an assertion failure when using an assertion build:\n>\n> #2 0x0000561800ea02e0 in ExceptionalCondition (conditionName=0x7f9115a88e91 \"found\", errorType=0x7f9115a88d11 \"FailedAssertion\",\n> fileName=0x7f9115a88e8a \"rtbm.c\", lineNumber=242) at /home/andres/src/postgresql/src/backend/utils/error/assert.c:69\n> #3 0x00007f9115a87645 in rtbm_add_tuples (rtbm=0x561806293280, blkno=0, offnums=0x7fffdccabb00, nitems=10) at rtbm.c:242\n> #4 0x00007f9115a8363d in load_rtbm (rtbm=0x561806293280, itemptrs=0x7f908a203050, nitems=10000000) at bdbench.c:618\n> #5 0x00007f9115a834b9 in rtbm_attach (lvtt=0x7f9115a8c300 <LVTestSubjects+352>, nitems=10000000, minblk=2139062143, maxblk=2139062143, maxoff=32639)\n> at bdbench.c:587\n> #6 0x00007f9115a83837 in attach (lvtt=0x7f9115a8c300 <LVTestSubjects+352>, nitems=10000000, minblk=2139062143, maxblk=2139062143, maxoff=32639)\n> at bdbench.c:658\n> #7 0x00007f9115a84190 in attach_dead_tuples (fcinfo=0x56180322d690) at bdbench.c:873\n>\n> I assume you just inverted the Assert(found) assertion?\n\nRight. Fixed it.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Fri, 9 Jul 2021 16:16:55 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jul 8, 2021 at 7:51 AM Peter Geoghegan <pg@bowt.ie> wrote:\n>\n> On Wed, Jul 7, 2021 at 1:24 PM Peter Geoghegan <pg@bowt.ie> wrote:\n> > I wonder how much it would help to break up that loop into two loops.\n> > Make the callback into a batch operation that generates state that\n> > describes what to do with each and every index tuple on the leaf page.\n> > The first loop would build a list of TIDs, then you'd call into\n> > vacuumlazy.c and get it to process the TIDs, and finally the second\n> > loop would physically delete the TIDs that need to be deleted. This\n> > would mean that there would be only one call per leaf page per\n> > btbulkdelete(). This would reduce the number of calls to the callback\n> > by at least 100x, and maybe more than 1000x.\n>\n> Maybe for something like rtbm.c (which is inspired by Roaring\n> bitmaps), you would really want to use an \"intersection\" operation for\n> this. The TIDs that we need to physically delete from the leaf page\n> inside btvacuumpage() are the intersection of two bitmaps: our bitmap\n> of all TIDs on the leaf page, and our bitmap of all TIDs that need to\n> be deleting by the ongoing btbulkdelete() call.\n\nAgreed. In such a batch operation, what we need to do here is to\ncompute the intersection of two bitmaps.\n\n>\n> Obviously the typical case is that most TIDs in the index do *not* get\n> deleted -- needing to delete more than ~20% of all TIDs in the index\n> will be rare. Ideally it would be very cheap to figure out that a TID\n> does not need to be deleted at all. Something a little like a negative\n> cache (but not a true negative cache). This is a little bit like how\n> hash joins can be made faster by adding a Bloom filter -- most hash\n> probes don't need to join a tuple in the real world, and we can make\n> these hash probes even faster by using a Bloom filter as a negative\n> cache.\n\nAgreed.\n\n>\n> If you had the list of TIDs from a leaf page sorted for batch\n> processing, and if you had roaring bitmap style \"chunks\" with\n> \"container\" metadata stored in the data structure, you could then use\n> merging/intersection -- that has some of the same advantages. I think\n> that this would be a lot more efficient than having one binary search\n> per TID. Most TIDs from the leaf page can be skipped over very\n> quickly, in large groups. It's very rare for VACUUM to need to delete\n> TIDs from completely random heap table blocks in the real world (some\n> kind of pattern is much more common).\n>\n> When this merging process finds 1 TID that might really be deletable\n> then it's probably going to find much more than 1 -- better to make\n> that cache miss take care of all of the TIDs together. Also seems like\n> the CPU could do some clever prefetching with this approach -- it\n> could prefetch TIDs where the initial chunk metadata is insufficient\n> to eliminate them early -- these are the groups of TIDs that will have\n> many TIDs that we actually need to delete. ISTM that improving\n> temporal locality through batching could matter a lot here.\n\nThat's a promising approach.\n\nIn rtbm, the pair of one hash entry and one container is used per\nblock. Therefore, we can skip TID from the leaf page by checking the\nhash table, if there is no dead tuple in the block. If there is the\nhash entry, since it means the block has at least one dead tuple, we\ncan look for the offset of TID from the leaf page from the container.\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Fri, 9 Jul 2021 17:30:24 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jul 8, 2021 at 10:40 PM Hannu Krosing <hannuk@google.com> wrote:\n>\n> Very nice results.\n>\n> I have been working on the same problem but a bit different solution -\n> a mix of binary search for (sub)pages and 32-bit bitmaps for\n> tid-in-page.\n>\n> Even with currebnt allocation heuristics (allocate 291 tids per page)\n> it initially allocate much less space, instead of current 291*6=1746\n> bytes per page it needs to allocate 80 bytes.\n>\n> Also it can be laid out so that it is friendly to parallel SIMD\n> searches doing up to 8 tid lookups in parallel.\n\nInteresting.\n\n>\n> That said, for allocating the tid array, the best solution is to\n> postpone it as much as possible and to do the initial collection into\n> a file, which\n>\n> 1) postpones the memory allocation to the beginning of index cleanups\n>\n> 2) lets you select the correct size and structure as you know more\n> about the distribution at that time\n>\n> 3) do the first heap pass in one go and then advance frozenxmin\n> *before* index cleanup\n\nI think we have to do index vacuuming before heap vacuuming (2nd heap\npass). So do you mean that it advances relfrozenxid of pg_class before\nboth index vacuuming and heap vacuuming?\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Fri, 9 Jul 2021 17:36:31 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2021-07-07 20:46:38 +0900, Masahiko Sawada wrote:\n> Currently, the TIDs of dead tuples are stored in an array that is\n> collectively allocated at the start of lazy vacuum and TID lookup uses\n> bsearch(). There are the following challenges and limitations:\n\n> So I prototyped a new data structure dedicated to storing dead tuples\n> during lazy vacuum while borrowing the idea from Roaring Bitmap[2].\n> The authors provide an implementation of Roaring Bitmap[3] (Apache\n> 2.0 license). But I've implemented this idea from scratch because we\n> need to integrate it with Dynamic Shared Memory/Area to support\n> parallel vacuum and need to support ItemPointerData, 6-bytes integer\n> in total, whereas the implementation supports only 4-bytes integers.\n> Also, when it comes to vacuum, we neither need to compute the\n> intersection, the union, nor the difference between sets, but need\n> only an existence check.\n> \n> The data structure is somewhat similar to TIDBitmap. It consists of\n> the hash table and the container area; the hash table has entries per\n> block and each block entry allocates its memory space, called a\n> container, in the container area to store its offset numbers. The\n> container area is actually an array of bytes and can be enlarged as\n> needed. In the container area, the data representation of offset\n> numbers varies depending on their cardinality. It has three container\n> types: array, bitmap, and run.\n\nHow are you thinking of implementing iteration efficiently for rtbm? The\nsecond heap pass needs that obviously... I think the only option would\nbe to qsort the whole thing?\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Fri, 9 Jul 2021 10:17:49 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2021-07-09 10:17:49 -0700, Andres Freund wrote:\n> On 2021-07-07 20:46:38 +0900, Masahiko Sawada wrote:\n> > Currently, the TIDs of dead tuples are stored in an array that is\n> > collectively allocated at the start of lazy vacuum and TID lookup uses\n> > bsearch(). There are the following challenges and limitations:\n> \n> > So I prototyped a new data structure dedicated to storing dead tuples\n> > during lazy vacuum while borrowing the idea from Roaring Bitmap[2].\n> > The authors provide an implementation of Roaring Bitmap[3] (Apache\n> > 2.0 license). But I've implemented this idea from scratch because we\n> > need to integrate it with Dynamic Shared Memory/Area to support\n> > parallel vacuum and need to support ItemPointerData, 6-bytes integer\n> > in total, whereas the implementation supports only 4-bytes integers.\n> > Also, when it comes to vacuum, we neither need to compute the\n> > intersection, the union, nor the difference between sets, but need\n> > only an existence check.\n> > \n> > The data structure is somewhat similar to TIDBitmap. It consists of\n> > the hash table and the container area; the hash table has entries per\n> > block and each block entry allocates its memory space, called a\n> > container, in the container area to store its offset numbers. The\n> > container area is actually an array of bytes and can be enlarged as\n> > needed. In the container area, the data representation of offset\n> > numbers varies depending on their cardinality. It has three container\n> > types: array, bitmap, and run.\n> \n> How are you thinking of implementing iteration efficiently for rtbm? The\n> second heap pass needs that obviously... I think the only option would\n> be to qsort the whole thing?\n\nI experimented further, trying to use an old radix tree implementation I\nhad lying around to store dead tuples. With a bit of trickery that seems\nto work well.\n\nThe radix tree implementation I have basically maps an int64 to another\nint64. Each level of the radix tree stores 6 bits of the key, and uses\nthose 6 bits to index a 1<<64 long array leading to the next level.\n\nMy first idea was to use itemptr_encode() to convert tids into an int64\nand store the lower 6 bits in the value part of the radix tree. That\nturned out to work well performance wise, but awfully memory usage\nwise. The problem is that we at most use 9 bits for offsets, but reserve\n16 bits for it in the ItemPointerData. Which means that there's often a\nlot of empty \"tree levels\" for those 0 bits, making it hard to get to a\ndecent memory usage.\n\nThe simplest way to address that was to simply compress out those\nguaranteed-to-be-zero bits. That results in memory usage that's quite\ngood - nearly always beating array, occasionally beating rtbm. It's an\nordered datastructure, so the latter isn't too surprising. For lookup\nperformance the radix approach is commonly among the best, if not the\nbest.\n\nA variation of the storage approach is to just use the block number as\nthe index, and store the tids as the value. Even with the absolutely\nnaive approach of just using a Bitmapset that reduces memory usage\nsubstantially - at a small cost to search performance. Of course it'd be\nbetter to use an adaptive approach like you did for rtbm, I just thought\nthis is good enough.\n\n\nThis largely works well, except when there are a large number of evenly\nspread out dead tuples. I don't think that's a particularly common\nsituation, but it's worth considering anyway.\n\n\nThe reason the memory usage can be larger for sparse workloads obviously\ncan lead to tree nodes with only one child. As they are quite large\n(1<<6 pointers to further children) that then can lead to large increase\nin memory usage.\n\nI have toyed with implementing adaptively large radix nodes like\nproposed in https://db.in.tum.de/~leis/papers/ART.pdf - but haven't\ngotten it quite working.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Fri, 9 Jul 2021 19:55:43 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sat, Jul 10, 2021 at 2:17 AM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n> On 2021-07-07 20:46:38 +0900, Masahiko Sawada wrote:\n> > Currently, the TIDs of dead tuples are stored in an array that is\n> > collectively allocated at the start of lazy vacuum and TID lookup uses\n> > bsearch(). There are the following challenges and limitations:\n>\n> > So I prototyped a new data structure dedicated to storing dead tuples\n> > during lazy vacuum while borrowing the idea from Roaring Bitmap[2].\n> > The authors provide an implementation of Roaring Bitmap[3] (Apache\n> > 2.0 license). But I've implemented this idea from scratch because we\n> > need to integrate it with Dynamic Shared Memory/Area to support\n> > parallel vacuum and need to support ItemPointerData, 6-bytes integer\n> > in total, whereas the implementation supports only 4-bytes integers.\n> > Also, when it comes to vacuum, we neither need to compute the\n> > intersection, the union, nor the difference between sets, but need\n> > only an existence check.\n> >\n> > The data structure is somewhat similar to TIDBitmap. It consists of\n> > the hash table and the container area; the hash table has entries per\n> > block and each block entry allocates its memory space, called a\n> > container, in the container area to store its offset numbers. The\n> > container area is actually an array of bytes and can be enlarged as\n> > needed. In the container area, the data representation of offset\n> > numbers varies depending on their cardinality. It has three container\n> > types: array, bitmap, and run.\n>\n> How are you thinking of implementing iteration efficiently for rtbm? The\n> second heap pass needs that obviously... I think the only option would\n> be to qsort the whole thing?\n\nYes, I'm thinking that the iteration of rtbm is somewhat similar to\ntbm. That is, we iterate and collect hash table entries and do qsort\nhash entries by the block number. Then fetch the entry along with its\ncontainer one by one in order of the block number.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Sat, 10 Jul 2021 21:11:20 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Sorry for the late reply.\n\nOn Sat, Jul 10, 2021 at 11:55 AM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n> On 2021-07-09 10:17:49 -0700, Andres Freund wrote:\n> > On 2021-07-07 20:46:38 +0900, Masahiko Sawada wrote:\n> > > Currently, the TIDs of dead tuples are stored in an array that is\n> > > collectively allocated at the start of lazy vacuum and TID lookup uses\n> > > bsearch(). There are the following challenges and limitations:\n> >\n> > > So I prototyped a new data structure dedicated to storing dead tuples\n> > > during lazy vacuum while borrowing the idea from Roaring Bitmap[2].\n> > > The authors provide an implementation of Roaring Bitmap[3] (Apache\n> > > 2.0 license). But I've implemented this idea from scratch because we\n> > > need to integrate it with Dynamic Shared Memory/Area to support\n> > > parallel vacuum and need to support ItemPointerData, 6-bytes integer\n> > > in total, whereas the implementation supports only 4-bytes integers.\n> > > Also, when it comes to vacuum, we neither need to compute the\n> > > intersection, the union, nor the difference between sets, but need\n> > > only an existence check.\n> > >\n> > > The data structure is somewhat similar to TIDBitmap. It consists of\n> > > the hash table and the container area; the hash table has entries per\n> > > block and each block entry allocates its memory space, called a\n> > > container, in the container area to store its offset numbers. The\n> > > container area is actually an array of bytes and can be enlarged as\n> > > needed. In the container area, the data representation of offset\n> > > numbers varies depending on their cardinality. It has three container\n> > > types: array, bitmap, and run.\n> >\n> > How are you thinking of implementing iteration efficiently for rtbm? The\n> > second heap pass needs that obviously... I think the only option would\n> > be to qsort the whole thing?\n>\n> I experimented further, trying to use an old radix tree implementation I\n> had lying around to store dead tuples. With a bit of trickery that seems\n> to work well.\n\nThank you for experimenting with another approach.\n\n>\n> The radix tree implementation I have basically maps an int64 to another\n> int64. Each level of the radix tree stores 6 bits of the key, and uses\n> those 6 bits to index a 1<<64 long array leading to the next level.\n>\n> My first idea was to use itemptr_encode() to convert tids into an int64\n> and store the lower 6 bits in the value part of the radix tree. That\n> turned out to work well performance wise, but awfully memory usage\n> wise. The problem is that we at most use 9 bits for offsets, but reserve\n> 16 bits for it in the ItemPointerData. Which means that there's often a\n> lot of empty \"tree levels\" for those 0 bits, making it hard to get to a\n> decent memory usage.\n>\n> The simplest way to address that was to simply compress out those\n> guaranteed-to-be-zero bits. That results in memory usage that's quite\n> good - nearly always beating array, occasionally beating rtbm. It's an\n> ordered datastructure, so the latter isn't too surprising. For lookup\n> performance the radix approach is commonly among the best, if not the\n> best.\n\nHow were its both lookup performance and memory usage comparing to\nintset? I guess the performance trends of those two approaches are\nsimilar since both consists of a tree. Intset encodes uint64 by\nsimple-8B encoding so I'm interested also in the comparison in terms\nof memory usage.\n\n>\n> A variation of the storage approach is to just use the block number as\n> the index, and store the tids as the value. Even with the absolutely\n> naive approach of just using a Bitmapset that reduces memory usage\n> substantially - at a small cost to search performance. Of course it'd be\n> better to use an adaptive approach like you did for rtbm, I just thought\n> this is good enough.\n>\n>\n> This largely works well, except when there are a large number of evenly\n> spread out dead tuples. I don't think that's a particularly common\n> situation, but it's worth considering anyway.\n>\n> The reason the memory usage can be larger for sparse workloads obviously\n> can lead to tree nodes with only one child. As they are quite large\n> (1<<6 pointers to further children) that then can lead to large increase\n> in memory usage.\n\nInteresting. How big was it in such workloads comparing to other data\nstructures?\n\nI personally like adaptive approaches especially in the context of\nvacuum improvements. We know common patterns of dead tuple\ndistribution but it’s not necessarily true since it depends on data\ndistribution and timings of autovacuum etc even with the same\nworkload. And we might be able to provide a new approach that works\nwell in 95% of use cases but if things get worse than before in\nanother 5% I think the approach is not a good approach. Ideally, it\nshould be better in common cases and at least be the same as before in\nother cases.\n\nBTW is the implementation of the radix tree approach available\nsomewhere? If so I'd like to experiment with that too.\n\n>\n> I have toyed with implementing adaptively large radix nodes like\n> proposed in https://db.in.tum.de/~leis/papers/ART.pdf - but haven't\n> gotten it quite working.\n\nThat seems promising approach.\n\nRegards,\n\n[1] https://www.postgresql.org/message-id/CA%2BTgmoakKFXwUv1Cx2mspUuPQHzYF74BfJ8koF5YdgVLCvhpwA%40mail.gmail.com\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Mon, 19 Jul 2021 15:20:54 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2021-07-19 15:20:54 +0900, Masahiko Sawada wrote:\n> BTW is the implementation of the radix tree approach available\n> somewhere? If so I'd like to experiment with that too.\n>\n> >\n> > I have toyed with implementing adaptively large radix nodes like\n> > proposed in https://db.in.tum.de/~leis/papers/ART.pdf - but haven't\n> > gotten it quite working.\n>\n> That seems promising approach.\n\nI've since implemented some, but not all of the ideas of that paper\n(adaptive node sizes, but not the tree compression pieces).\n\nE.g. for\n\nselect prepare(\n1000000, -- max block\n20, -- # of dead tuples per page\n10, -- dead tuples interval within a page\n1 -- page inteval\n);\n attach size shuffled\tordered\narray 69 ms 120 MB 84.87 s 8.66 s\nintset 173 ms 65 MB 68.82 s 11.75 s\nrtbm 201 ms 67 MB 11.54 s 1.35 s\ntbm 232 ms 100 MB 8.33 s 1.26 s\nvtbm 162 ms 58 MB 10.01 s 1.22 s\nradix 88 ms 42 MB 11.49 s 1.67 s\n\nand for\nselect prepare(\n1000000, -- max block\n10, -- # of dead tuples per page\n1, -- dead tuples interval within a page\n1 -- page inteval\n);\n\n attach size shuffled\tordered\narray 24 ms 60MB 3.74s 1.02 s\nintset 97 ms 49MB 3.14s 0.75 s\nrtbm 138 ms 36MB 0.41s 0.14 s\ntbm 198 ms 101MB 0.41s 0.14 s\nvtbm 118 ms 27MB 0.39s 0.12 s\nradix 33 ms 10MB 0.28s 0.10 s\n\n(this is an almost unfairly good case for radix)\n\nRunning out of time to format the results of the other testcases before\nI have to run, unfortunately. radix uses 42MB both in test case 3 and\n4.\n\n\nThe radix tree code isn't good right now. A ridiculous amount of\nduplication etc. The naming clearly shows its origins from a buffer\nmapping radix tree...\n\n\nCurrently in a bunch of the cases 20% of the time is spent in\nradix_reaped(). If I move that into radix.c and for bfm_lookup() to be\ninlined, I get reduced overhead. rbtm for example essentially already\ndoes that, because it does splitting of ItemPointer in rtbm.c.\n\n\nI've attached my current patches against your tree.\n\nGreetings,\n\nAndres Freund", "msg_date": "Mon, 19 Jul 2021 16:49:15 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2021-07-19 16:49:15 -0700, Andres Freund wrote:\n> E.g. for\n> \n> select prepare(\n> 1000000, -- max block\n> 20, -- # of dead tuples per page\n> 10, -- dead tuples interval within a page\n> 1 -- page inteval\n> );\n> attach size shuffled\tordered\n> array 69 ms 120 MB 84.87 s 8.66 s\n> intset 173 ms 65 MB 68.82 s 11.75 s\n> rtbm 201 ms 67 MB 11.54 s 1.35 s\n> tbm 232 ms 100 MB 8.33 s 1.26 s\n> vtbm 162 ms 58 MB 10.01 s 1.22 s\n> radix 88 ms 42 MB 11.49 s 1.67 s\n> \n> and for\n> select prepare(\n> 1000000, -- max block\n> 10, -- # of dead tuples per page\n> 1, -- dead tuples interval within a page\n> 1 -- page inteval\n> );\n> \n> attach size shuffled\tordered\n> array 24 ms 60MB 3.74s 1.02 s\n> intset 97 ms 49MB 3.14s 0.75 s\n> rtbm 138 ms 36MB 0.41s 0.14 s\n> tbm 198 ms 101MB 0.41s 0.14 s\n> vtbm 118 ms 27MB 0.39s 0.12 s\n> radix 33 ms 10MB 0.28s 0.10 s\n\nOh, I forgot: The performance numbers are with the fixes in\nhttps://www.postgresql.org/message-id/20210717194333.mr5io3zup3kxahfm%40alap3.anarazel.de\napplied.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Mon, 19 Jul 2021 17:00:12 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nI've dreamed to write more compact structure for vacuum for three\nyears, but life didn't give me a time to.\n\nLet me join to friendly competition.\n\nI've bet on HATM approach: popcount-ing bitmaps for non-empty elements.\n\nNovelties:\n- 32 consecutive pages are stored together in a single sparse array\n (called \"chunks\").\n Chunk contains:\n - its number,\n - 4 byte bitmap of non-empty pages,\n - array of non-empty page headers 2 byte each.\n Page header contains offset of page's bitmap in bitmaps container.\n (Except if there is just one dead tuple in a page. Then it is\n written into header itself).\n - container of concatenated bitmaps.\n\n Ie, page metadata overhead varies from 2.4byte (32pages in single \nchunk)\n to 18byte (1 page in single chunk) per page.\n\n- If page's bitmap is sparse ie contains a lot of \"all-zero\" bytes,\n it is compressed by removing zero byte and indexing with two-level\n bitmap index.\n Two-level index - zero bytes in first level are removed using\n second level. It is mostly done for 32kb pages, but let it stay since\n it is almost free.\n\n- If page's bitmaps contains a lot of \"all-one\" bytes, it is inverted\n and then encoded as sparse.\n\n- Chunks are allocated with custom \"allocator\" that has no\n per-allocation overhead. It is possible because there is no need\n to perform \"free\": allocator is freed as whole at once.\n\n- Array of pointers to chunks is also bitmap indexed. It saves cpu time\n when not every 32 consecutive pages has at least one dead tuple.\n But consumes time otherwise. Therefore additional optimization is \nadded\n to quick skip lookup for first non-empty run of chunks.\n (Ahhh, I believe this explanation is awful).\n\nAndres Freund wrote 2021-07-20 02:49:\n> Hi,\n> \n> On 2021-07-19 15:20:54 +0900, Masahiko Sawada wrote:\n>> BTW is the implementation of the radix tree approach available\n>> somewhere? If so I'd like to experiment with that too.\n>> \n>> >\n>> > I have toyed with implementing adaptively large radix nodes like\n>> > proposed in https://db.in.tum.de/~leis/papers/ART.pdf - but haven't\n>> > gotten it quite working.\n>> \n>> That seems promising approach.\n> \n> I've since implemented some, but not all of the ideas of that paper\n> (adaptive node sizes, but not the tree compression pieces).\n> \n> E.g. for\n> \n> select prepare(\n> 1000000, -- max block\n> 20, -- # of dead tuples per page\n> 10, -- dead tuples interval within a page\n> 1 -- page inteval\n> );\n> attach size shuffled\tordered\n> array 69 ms 120 MB 84.87 s 8.66 s\n> intset 173 ms 65 MB 68.82 s 11.75 s\n> rtbm 201 ms 67 MB 11.54 s 1.35 s\n> tbm 232 ms 100 MB 8.33 s 1.26 s\n> vtbm 162 ms 58 MB 10.01 s 1.22 s\n> radix 88 ms 42 MB 11.49 s 1.67 s\n> \n> and for\n> select prepare(\n> 1000000, -- max block\n> 10, -- # of dead tuples per page\n> 1, -- dead tuples interval within a page\n> 1 -- page inteval\n> );\n> \n> attach size shuffled\tordered\n> array 24 ms 60MB 3.74s 1.02 s\n> intset 97 ms 49MB 3.14s 0.75 s\n> rtbm 138 ms 36MB 0.41s 0.14 s\n> tbm 198 ms 101MB 0.41s 0.14 s\n> vtbm 118 ms 27MB 0.39s 0.12 s\n> radix 33 ms 10MB 0.28s 0.10 s\n> \n> (this is an almost unfairly good case for radix)\n> \n> Running out of time to format the results of the other testcases before\n> I have to run, unfortunately. radix uses 42MB both in test case 3 and\n> 4.\n\nMy results (Ubuntu 20.04 Intel Core i7-1165G7):\n\nTest1.\n\nselect prepare(1000000, 10, 20, 1); -- original\n\n attach size shuffled\narray 29ms 60MB 93.99s\nintset 93ms 49MB 80.94s\nrtbm 171ms 67MB 14.05s\ntbm 238ms 100MB 8.36s\nvtbm 148ms 59MB 9.12s\nradix 100ms 42MB 11.81s\nsvtm 75ms 29MB 8.90s\n\nselect prepare(1000000, 20, 10, 1); -- Andres's variant\n\n attach size shuffled\narray 61ms 120MB 111.91s\nintset 163ms 66MB 85.00s\nrtbm 236ms 67MB 10.72s\ntbm 290ms 100MB 8.40s\nvtbm 190ms 59MB 9.28s\nradix 117ms 42MB 12.00s\nsvtm 98ms 29MB 8.77s\n\nTest2.\n\nselect prepare(1000000, 10, 1, 1);\n\n attach size shuffled\narray 31ms 60MB 4.68s\nintset 97ms 49MB 4.03s\nrtbm 163ms 36MB 0.42s\ntbm 240ms 100MB 0.42s\nvtbm 136ms 27MB 0.36s\nradix 60ms 10MB 0.72s\nsvtm 39ms 6MB 0.19s\n\n(Bad radix result probably due to smaller cache in notebook's CPU ?)\n\nTest3\n\nselect prepare(1000000, 2, 100, 1);\n\n attach size shuffled\narray 6ms 12MB 53.42s\nintset 23ms 16MB 54.99s\nrtbm 115ms 38MB 8.19s\ntbm 186ms 100MB 8.37s\nvtbm 105ms 59MB 9.08s\nradix 64ms 42MB 10.41s\nsvtm 73ms 10MB 7.49s\n\nTest4\n\nselect prepare(1000000, 100, 1, 1);\n\n attach size shuffled\narray 304ms 600MB 75.12s\nintset 775ms 98MB 47.49s\nrtbm 356ms 38MB 4.11s\ntbm 539ms 100MB 4.20s\nvtbm 493ms 42MB 4.44s\nradix 263ms 42MB 6.05s\nsvtm 360ms 8MB 3.49s\n\nTherefore Specialized Vaccum Tid Map always consumes least memory amount\nand usually faster.\n\n\n(I've applied Andres's patch for slab allocator before testing)\n\nAttached patch is against 6753911a444e12e4b55 commit of your pgtools \nwith\napplied Andres's patches for radix method.\n\nI've also pushed it to github:\nhttps://github.com/funny-falcon/pgtools/tree/svtm/bdbench\n\nregards,\nYura Sokolov", "msg_date": "Sun, 25 Jul 2021 19:07:18 +0300", "msg_from": "Yura Sokolov <y.sokolov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jul 26, 2021 at 1:07 AM Yura Sokolov <y.sokolov@postgrespro.ru> wrote:\n>\n> Hi,\n>\n> I've dreamed to write more compact structure for vacuum for three\n> years, but life didn't give me a time to.\n>\n> Let me join to friendly competition.\n>\n> I've bet on HATM approach: popcount-ing bitmaps for non-empty elements.\n\nThank you for proposing the new idea!\n\n>\n> Novelties:\n> - 32 consecutive pages are stored together in a single sparse array\n> (called \"chunks\").\n> Chunk contains:\n> - its number,\n> - 4 byte bitmap of non-empty pages,\n> - array of non-empty page headers 2 byte each.\n> Page header contains offset of page's bitmap in bitmaps container.\n> (Except if there is just one dead tuple in a page. Then it is\n> written into header itself).\n> - container of concatenated bitmaps.\n>\n> Ie, page metadata overhead varies from 2.4byte (32pages in single\n> chunk)\n> to 18byte (1 page in single chunk) per page.\n>\n> - If page's bitmap is sparse ie contains a lot of \"all-zero\" bytes,\n> it is compressed by removing zero byte and indexing with two-level\n> bitmap index.\n> Two-level index - zero bytes in first level are removed using\n> second level. It is mostly done for 32kb pages, but let it stay since\n> it is almost free.\n>\n> - If page's bitmaps contains a lot of \"all-one\" bytes, it is inverted\n> and then encoded as sparse.\n>\n> - Chunks are allocated with custom \"allocator\" that has no\n> per-allocation overhead. It is possible because there is no need\n> to perform \"free\": allocator is freed as whole at once.\n>\n> - Array of pointers to chunks is also bitmap indexed. It saves cpu time\n> when not every 32 consecutive pages has at least one dead tuple.\n> But consumes time otherwise. Therefore additional optimization is\n> added\n> to quick skip lookup for first non-empty run of chunks.\n> (Ahhh, I believe this explanation is awful).\n\nIt sounds better than my proposal.\n\n>\n> Andres Freund wrote 2021-07-20 02:49:\n> > Hi,\n> >\n> > On 2021-07-19 15:20:54 +0900, Masahiko Sawada wrote:\n> >> BTW is the implementation of the radix tree approach available\n> >> somewhere? If so I'd like to experiment with that too.\n> >>\n> >> >\n> >> > I have toyed with implementing adaptively large radix nodes like\n> >> > proposed in https://db.in.tum.de/~leis/papers/ART.pdf - but haven't\n> >> > gotten it quite working.\n> >>\n> >> That seems promising approach.\n> >\n> > I've since implemented some, but not all of the ideas of that paper\n> > (adaptive node sizes, but not the tree compression pieces).\n> >\n> > E.g. for\n> >\n> > select prepare(\n> > 1000000, -- max block\n> > 20, -- # of dead tuples per page\n> > 10, -- dead tuples interval within a page\n> > 1 -- page inteval\n> > );\n> > attach size shuffled ordered\n> > array 69 ms 120 MB 84.87 s 8.66 s\n> > intset 173 ms 65 MB 68.82 s 11.75 s\n> > rtbm 201 ms 67 MB 11.54 s 1.35 s\n> > tbm 232 ms 100 MB 8.33 s 1.26 s\n> > vtbm 162 ms 58 MB 10.01 s 1.22 s\n> > radix 88 ms 42 MB 11.49 s 1.67 s\n> >\n> > and for\n> > select prepare(\n> > 1000000, -- max block\n> > 10, -- # of dead tuples per page\n> > 1, -- dead tuples interval within a page\n> > 1 -- page inteval\n> > );\n> >\n> > attach size shuffled ordered\n> > array 24 ms 60MB 3.74s 1.02 s\n> > intset 97 ms 49MB 3.14s 0.75 s\n> > rtbm 138 ms 36MB 0.41s 0.14 s\n> > tbm 198 ms 101MB 0.41s 0.14 s\n> > vtbm 118 ms 27MB 0.39s 0.12 s\n> > radix 33 ms 10MB 0.28s 0.10 s\n> >\n> > (this is an almost unfairly good case for radix)\n> >\n> > Running out of time to format the results of the other testcases before\n> > I have to run, unfortunately. radix uses 42MB both in test case 3 and\n> > 4.\n>\n> My results (Ubuntu 20.04 Intel Core i7-1165G7):\n>\n> Test1.\n>\n> select prepare(1000000, 10, 20, 1); -- original\n>\n> attach size shuffled\n> array 29ms 60MB 93.99s\n> intset 93ms 49MB 80.94s\n> rtbm 171ms 67MB 14.05s\n> tbm 238ms 100MB 8.36s\n> vtbm 148ms 59MB 9.12s\n> radix 100ms 42MB 11.81s\n> svtm 75ms 29MB 8.90s\n>\n> select prepare(1000000, 20, 10, 1); -- Andres's variant\n>\n> attach size shuffled\n> array 61ms 120MB 111.91s\n> intset 163ms 66MB 85.00s\n> rtbm 236ms 67MB 10.72s\n> tbm 290ms 100MB 8.40s\n> vtbm 190ms 59MB 9.28s\n> radix 117ms 42MB 12.00s\n> svtm 98ms 29MB 8.77s\n>\n> Test2.\n>\n> select prepare(1000000, 10, 1, 1);\n>\n> attach size shuffled\n> array 31ms 60MB 4.68s\n> intset 97ms 49MB 4.03s\n> rtbm 163ms 36MB 0.42s\n> tbm 240ms 100MB 0.42s\n> vtbm 136ms 27MB 0.36s\n> radix 60ms 10MB 0.72s\n> svtm 39ms 6MB 0.19s\n>\n> (Bad radix result probably due to smaller cache in notebook's CPU ?)\n>\n> Test3\n>\n> select prepare(1000000, 2, 100, 1);\n>\n> attach size shuffled\n> array 6ms 12MB 53.42s\n> intset 23ms 16MB 54.99s\n> rtbm 115ms 38MB 8.19s\n> tbm 186ms 100MB 8.37s\n> vtbm 105ms 59MB 9.08s\n> radix 64ms 42MB 10.41s\n> svtm 73ms 10MB 7.49s\n>\n> Test4\n>\n> select prepare(1000000, 100, 1, 1);\n>\n> attach size shuffled\n> array 304ms 600MB 75.12s\n> intset 775ms 98MB 47.49s\n> rtbm 356ms 38MB 4.11s\n> tbm 539ms 100MB 4.20s\n> vtbm 493ms 42MB 4.44s\n> radix 263ms 42MB 6.05s\n> svtm 360ms 8MB 3.49s\n>\n> Therefore Specialized Vaccum Tid Map always consumes least memory amount\n> and usually faster.\n\nI'll experiment with the proposed ideas including this idea in more\nscenarios and share the results tomorrow.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Mon, 26 Jul 2021 23:01:46 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jul 26, 2021 at 11:01 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> I'll experiment with the proposed ideas including this idea in more\n> scenarios and share the results tomorrow.\n>\n\nI've done some benchmarks for proposed data structures. In this trial,\nI've done with the scenario where dead tuples are concentrated on a\nparticular range of table blocks (test 5-8), in addition to the\nscenarios I've done in the previous trial. Also, I've done benchmarks\nof each scenario while increasing table size. In the first test, the\nmaximum block number of the table is 1,000,000 (i.g., 8GB table) and\nin the second test, it's 10,000,000 (80GB table). We can see how\nperformance and memory consumption changes with a large-scale table.\nHere are the results:\n\n* Test 1\nselect prepare(\n1000000, -- max block\n10, -- # of dead tuples per page\n1, -- dead tuples interval within a page\n1, -- # of consecutive pages having dead tuples\n20 -- page interval\n);\n\n name | attach | attach | shuffled | size_x10 | attach_x10| shuffled_x10\n--------+-----------+--------+----------+------------+-----------+-------------\n array | 57.23 MB | 0.040 | 98.613 | 572.21 MB | 0.387 | 1521.981\n intset | 46.88 MB | 0.114 | 75.944 | 468.67 MB | 0.961 | 997.760\n radix | 40.26 MB | 0.102 | 18.427 | 336.64 MB | 0.797 | 266.146\n rtbm | 64.02 MB | 0.234 | 22.443 | 512.02 MB | 2.230 | 275.143\n svtm | 27.28 MB | 0.060 | 13.568 | 274.07 MB | 0.476 | 211.073\n tbm | 96.01 MB | 0.273 | 10.347 | 768.01 MB | 2.882 | 128.103\n\n* Test 2\nselect prepare(\n1000000, -- max block\n10, -- # of dead tuples per page\n1, -- dead tuples interval within a page\n1, -- # of consecutive pages having dead tuples\n1 -- page interval\n);\n\n name | attach | attach | shuffled | size_x10 | attach_x10| shuffled_x10\n--------+-----------+--------+----------+------------+-----------+-------------\n array | 57.23 MB | 0.041 | 4.757 | 572.21 MB | 0.344 | 71.228\n intset | 46.88 MB | 0.127 | 3.762 | 468.67 MB | 1.093 | 49.573\n radix | 9.95 MB | 0.048 | 0.679 | 82.57 MB | 0.371 | 16.211\n rtbm | 34.02 MB | 0.179 | 0.534 | 288.02 MB | 2.092 | 8.693\n svtm | 5.78 MB | 0.043 | 0.239 | 54.60 MB | 0.342 | 7.759\n tbm | 96.01 MB | 0.274 | 0.521 | 768.01 MB | 2.685 | 6.360\n\n* Test 3\nselect prepare(\n1000000, -- max block\n2, -- # of dead tuples per page\n100, -- dead tuples interval within a page\n1, -- # of consecutive pages having dead tuples\n1 -- page interval\n);\n\n name | attach | attach | shuffled | size_x10 | attach_x10| shuffled_x10\n--------+-----------+--------+----------+------------+-----------+-------------\n array | 11.45 MB | 0.009 | 57.698 | 114.45 MB | 0.076 | 1045.639\n intset | 15.63 MB | 0.031 | 46.083 | 156.23 MB | 0.243 | 848.525\n radix | 40.26 MB | 0.063 | 13.755 | 336.64 MB | 0.501 | 223.413\n rtbm | 36.02 MB | 0.123 | 11.527 | 320.02 MB | 1.843 | 180.977\n svtm | 9.28 MB | 0.053 | 9.631 | 92.59 MB | 0.438 | 212.626\n tbm | 96.01 MB | 0.228 | 10.381 | 768.01 MB | 2.258 | 126.630\n\n* Test 4\nselect prepare(\n1000000, -- max block\n100, -- # of dead tuples per page\n1, -- dead tuples interval within a page\n1, -- # of consecutive pages having dead tuples\n1 -- page interval\n);\n\n name | attach | attach | shuffled | size_x10 | attach_x10| shuffled_x10\n--------+-----------+--------+----------+------------+-----------+-------------\n array | 572.21 MB | 0.367 | 78.047 | 5722.05 MB | 3.942 | 1154.776\n intset | 93.74 MB | 0.777 | 45.146 | 937.34 MB | 7.716 | 643.708\n radix | 40.26 MB | 0.203 | 9.015 | 336.64 MB | 1.775 | 133.294\n rtbm | 36.02 MB | 0.369 | 5.639 | 320.02 MB | 3.823 | 88.832\n svtm | 7.28 MB | 0.294 | 3.891 | 73.60 MB | 2.690 | 103.744\n tbm | 96.01 MB | 0.534 | 5.223 | 768.01 MB | 5.679 | 60.632\n\n\n* Test 5\nselect prepare(\n1000000, -- max block\n150, -- # of dead tuples per page\n1, -- dead tuples interval within a page\n10000, -- # of consecutive pages having dead tuples\n20000 -- page interval\n);\n\nThere are 10000 consecutive pages that have 150 dead tuples at every\n20000 pages.\n\n name | attach | attach | shuffled | size_x10 | attach_x10| shuffled_x10\n--------+-----------+--------+----------+------------+-----------+-------------\n array | 429.16 MB | 0.274 | 75.664 | 4291.54 MB | 3.067 | 1259.501\n intset | 46.88 MB | 0.559 | 36.449 | 468.67 MB | 4.565 | 517.445\n radix | 20.26 MB | 0.166 | 8.466 | 196.90 MB | 1.273 | 166.587\n rtbm | 18.02 MB | 0.242 | 8.491 | 160.02 MB | 2.407 | 171.725\n svtm | 3.66 MB | 0.243 | 3.635 | 37.10 MB | 2.022 | 86.165\n tbm | 48.01 MB | 0.344 | 9.763 | 384.01 MB | 3.327 | 151.824\n\n* Test 6\nselect prepare(\n1000000, -- max block\n10, -- # of dead tuples per page\n1, -- dead tuples interval within a page\n10000, -- # of consecutive pages having dead tuples\n20000 -- page interval\n);\n\nThere are 10000 consecutive pages that have 10 dead tuples at every 20000 pages.\n\n name | attach | attach | shuffled | size_x10 | attach_x10| shuffled_x10\n--------+-----------+--------+----------+------------+-----------+-------------\n array | 28.62 MB | 0.022 | 2.791 | 286.11 MB | 0.170 | 46.920\n intset | 23.45 MB | 0.061 | 2.156 | 234.34 MB | 0.501 | 32.577\n radix | 5.04 MB | 0.026 | 0.433 | 48.57 MB | 0.191 | 11.060\n rtbm | 17.02 MB | 0.074 | 0.533 | 144.02 MB | 0.954 | 11.502\n svtm | 3.16 MB | 0.023 | 0.206 | 27.60 MB | 0.175 | 4.886\n tbm | 48.01 MB | 0.132 | 0.656 | 384.01 MB | 1.284 | 10.231\n\n* Test 7\nselect prepare(\n1000000, -- max block\n150, -- # of dead tuples per page\n1, -- dead tuples interval within a page\n1000, -- # of consecutive pages having dead tuples\n999000 -- page interval\n);\n\nThere are pages that have 150 dead tuples at first 1000 blocks and\nlast 1000 blocks.\n\n name | attach | attach | shuffled | size_x10 | attach_x10| shuffled_x10\n--------+-----------+--------+----------+------------+-----------+-------------\n array | 1.72 MB | 0.002 | 7.507 | 17.17 MB | 0.011 | 76.510\n intset | 0.20 MB | 0.003 | 6.742 | 1.89 MB | 0.022 | 52.122\n radix | 0.20 MB | 0.001 | 1.023 | 1.07 MB | 0.007 | 12.023\n rtbm | 0.15 MB | 0.001 | 2.637 | 0.65 MB | 0.009 | 34.528\n svtm | 0.52 MB | 0.002 | 0.721 | 0.61 MB | 0.010 | 6.434\n tbm | 0.20 MB | 0.002 | 2.733 | 1.51 MB | 0.015 | 38.538\n\n* Test 8\nselect prepare(\n1000000, -- max block\n100, -- # of dead tuples per page\n1, -- dead tuples interval within a page\n50, -- # of consecutive pages having dead tuples\n100 -- page interval\n);\n\nThere are 50 consecutive pages that have 100 dead tuples at every 100 pages.\n\n name | attach | attach | shuffled | size_x10 | attach_x10| shuffled_x10\n--------+-----------+--------+----------+------------+-----------+-------------\n array | 286.11 MB | 0.184 | 67.233 | 2861.03 MB | 1.743 | 979.070\n intset | 46.88 MB | 0.389 | 35.176 | 468.67 MB | 3.698 | 505.322\n radix | 21.82 MB | 0.116 | 6.160 | 186.86 MB | 0.891 | 117.730\n rtbm | 18.02 MB | 0.182 | 5.909 | 160.02 MB | 1.870 | 112.550\n svtm | 4.28 MB | 0.152 | 3.213 | 37.60 MB | 1.383 | 79.073\n tbm | 48.01 MB | 0.265 | 6.673 | 384.01 MB | 2.586 | 101.327\n\nOverall, 'svtm' is faster and consumes less memory. 'radix' tree also\nhas good performance and memory usage.\n\n From these results, svtm is the best data structure among proposed\nideas for dead tuple storage used during lazy vacuum in terms of\nperformance and memory usage. I think it can support iteration by\nextracting the offset of dead tuples for each block while iterating\nchunks.\n\nApart from performance and memory usage points of view, we also need\nto consider the reusability of the code. When I started this thread, I\nthought the best data structure would be the one optimized for\nvacuum's dead tuple storage. However, if we can use a data structure\nthat can also be used in general, we can use it also for other\npurposes. Moreover, if it's too optimized for the current TID system\n(32 bits block number, 16 bits offset number, maximum block/offset\nnumber, etc.) it may become a blocker for future changes.\n\nIn that sense, radix tree also seems good since it can also be used in\ngist vacuum as a replacement for intset, or a replacement for hash\ntable for shared buffer as discussed before. Are there any other use\ncases? On the other hand, I’m concerned that radix tree would be an\nover-engineering in terms of vacuum's dead tuples storage since the\ndead tuple storage is static data and requires only lookup operation,\nso if we want to use radix tree as dead tuple storage, I'd like to see\nfurther use cases.\n\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Tue, 27 Jul 2021 13:06:56 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Masahiko Sawada писал 2021-07-27 07:06:\n> On Mon, Jul 26, 2021 at 11:01 PM Masahiko Sawada \n> <sawada.mshk@gmail.com> wrote:\n>> \n>> I'll experiment with the proposed ideas including this idea in more\n>> scenarios and share the results tomorrow.\n>> \n> \n> I've done some benchmarks for proposed data structures. In this trial,\n> I've done with the scenario where dead tuples are concentrated on a\n> particular range of table blocks (test 5-8), in addition to the\n> scenarios I've done in the previous trial. Also, I've done benchmarks\n> of each scenario while increasing table size. In the first test, the\n> maximum block number of the table is 1,000,000 (i.g., 8GB table) and\n> in the second test, it's 10,000,000 (80GB table). We can see how\n> performance and memory consumption changes with a large-scale table.\n> Here are the results:\n> \n> * Test 1\n> select prepare(\n> 1000000, -- max block\n> 10, -- # of dead tuples per page\n> 1, -- dead tuples interval within a page\n> 1, -- # of consecutive pages having dead tuples\n> 20 -- page interval\n> );\n> \n> name | attach | attach | shuffled | size_x10 | attach_x10| \n> shuffled_x10\n> --------+-----------+--------+----------+------------+-----------+-------------\n> array | 57.23 MB | 0.040 | 98.613 | 572.21 MB | 0.387 | \n> 1521.981\n> intset | 46.88 MB | 0.114 | 75.944 | 468.67 MB | 0.961 | \n> 997.760\n> radix | 40.26 MB | 0.102 | 18.427 | 336.64 MB | 0.797 | \n> 266.146\n> rtbm | 64.02 MB | 0.234 | 22.443 | 512.02 MB | 2.230 | \n> 275.143\n> svtm | 27.28 MB | 0.060 | 13.568 | 274.07 MB | 0.476 | \n> 211.073\n> tbm | 96.01 MB | 0.273 | 10.347 | 768.01 MB | 2.882 | \n> 128.103\n> \n> * Test 2\n> select prepare(\n> 1000000, -- max block\n> 10, -- # of dead tuples per page\n> 1, -- dead tuples interval within a page\n> 1, -- # of consecutive pages having dead tuples\n> 1 -- page interval\n> );\n> \n> name | attach | attach | shuffled | size_x10 | attach_x10| \n> shuffled_x10\n> --------+-----------+--------+----------+------------+-----------+-------------\n> array | 57.23 MB | 0.041 | 4.757 | 572.21 MB | 0.344 | \n> 71.228\n> intset | 46.88 MB | 0.127 | 3.762 | 468.67 MB | 1.093 | \n> 49.573\n> radix | 9.95 MB | 0.048 | 0.679 | 82.57 MB | 0.371 | \n> 16.211\n> rtbm | 34.02 MB | 0.179 | 0.534 | 288.02 MB | 2.092 | \n> 8.693\n> svtm | 5.78 MB | 0.043 | 0.239 | 54.60 MB | 0.342 | \n> 7.759\n> tbm | 96.01 MB | 0.274 | 0.521 | 768.01 MB | 2.685 | \n> 6.360\n> \n> * Test 3\n> select prepare(\n> 1000000, -- max block\n> 2, -- # of dead tuples per page\n> 100, -- dead tuples interval within a page\n> 1, -- # of consecutive pages having dead tuples\n> 1 -- page interval\n> );\n> \n> name | attach | attach | shuffled | size_x10 | attach_x10| \n> shuffled_x10\n> --------+-----------+--------+----------+------------+-----------+-------------\n> array | 11.45 MB | 0.009 | 57.698 | 114.45 MB | 0.076 | \n> 1045.639\n> intset | 15.63 MB | 0.031 | 46.083 | 156.23 MB | 0.243 | \n> 848.525\n> radix | 40.26 MB | 0.063 | 13.755 | 336.64 MB | 0.501 | \n> 223.413\n> rtbm | 36.02 MB | 0.123 | 11.527 | 320.02 MB | 1.843 | \n> 180.977\n> svtm | 9.28 MB | 0.053 | 9.631 | 92.59 MB | 0.438 | \n> 212.626\n> tbm | 96.01 MB | 0.228 | 10.381 | 768.01 MB | 2.258 | \n> 126.630\n> \n> * Test 4\n> select prepare(\n> 1000000, -- max block\n> 100, -- # of dead tuples per page\n> 1, -- dead tuples interval within a page\n> 1, -- # of consecutive pages having dead tuples\n> 1 -- page interval\n> );\n> \n> name | attach | attach | shuffled | size_x10 | attach_x10| \n> shuffled_x10\n> --------+-----------+--------+----------+------------+-----------+-------------\n> array | 572.21 MB | 0.367 | 78.047 | 5722.05 MB | 3.942 | \n> 1154.776\n> intset | 93.74 MB | 0.777 | 45.146 | 937.34 MB | 7.716 | \n> 643.708\n> radix | 40.26 MB | 0.203 | 9.015 | 336.64 MB | 1.775 | \n> 133.294\n> rtbm | 36.02 MB | 0.369 | 5.639 | 320.02 MB | 3.823 | \n> 88.832\n> svtm | 7.28 MB | 0.294 | 3.891 | 73.60 MB | 2.690 | \n> 103.744\n> tbm | 96.01 MB | 0.534 | 5.223 | 768.01 MB | 5.679 | \n> 60.632\n> \n> \n> * Test 5\n> select prepare(\n> 1000000, -- max block\n> 150, -- # of dead tuples per page\n> 1, -- dead tuples interval within a page\n> 10000, -- # of consecutive pages having dead tuples\n> 20000 -- page interval\n> );\n> \n> There are 10000 consecutive pages that have 150 dead tuples at every\n> 20000 pages.\n> \n> name | attach | attach | shuffled | size_x10 | attach_x10| \n> shuffled_x10\n> --------+-----------+--------+----------+------------+-----------+-------------\n> array | 429.16 MB | 0.274 | 75.664 | 4291.54 MB | 3.067 | \n> 1259.501\n> intset | 46.88 MB | 0.559 | 36.449 | 468.67 MB | 4.565 | \n> 517.445\n> radix | 20.26 MB | 0.166 | 8.466 | 196.90 MB | 1.273 | \n> 166.587\n> rtbm | 18.02 MB | 0.242 | 8.491 | 160.02 MB | 2.407 | \n> 171.725\n> svtm | 3.66 MB | 0.243 | 3.635 | 37.10 MB | 2.022 | \n> 86.165\n> tbm | 48.01 MB | 0.344 | 9.763 | 384.01 MB | 3.327 | \n> 151.824\n> \n> * Test 6\n> select prepare(\n> 1000000, -- max block\n> 10, -- # of dead tuples per page\n> 1, -- dead tuples interval within a page\n> 10000, -- # of consecutive pages having dead tuples\n> 20000 -- page interval\n> );\n> \n> There are 10000 consecutive pages that have 10 dead tuples at every \n> 20000 pages.\n> \n> name | attach | attach | shuffled | size_x10 | attach_x10| \n> shuffled_x10\n> --------+-----------+--------+----------+------------+-----------+-------------\n> array | 28.62 MB | 0.022 | 2.791 | 286.11 MB | 0.170 | \n> 46.920\n> intset | 23.45 MB | 0.061 | 2.156 | 234.34 MB | 0.501 | \n> 32.577\n> radix | 5.04 MB | 0.026 | 0.433 | 48.57 MB | 0.191 | \n> 11.060\n> rtbm | 17.02 MB | 0.074 | 0.533 | 144.02 MB | 0.954 | \n> 11.502\n> svtm | 3.16 MB | 0.023 | 0.206 | 27.60 MB | 0.175 | \n> 4.886\n> tbm | 48.01 MB | 0.132 | 0.656 | 384.01 MB | 1.284 | \n> 10.231\n> \n> * Test 7\n> select prepare(\n> 1000000, -- max block\n> 150, -- # of dead tuples per page\n> 1, -- dead tuples interval within a page\n> 1000, -- # of consecutive pages having dead tuples\n> 999000 -- page interval\n> );\n> \n> There are pages that have 150 dead tuples at first 1000 blocks and\n> last 1000 blocks.\n> \n> name | attach | attach | shuffled | size_x10 | attach_x10| \n> shuffled_x10\n> --------+-----------+--------+----------+------------+-----------+-------------\n> array | 1.72 MB | 0.002 | 7.507 | 17.17 MB | 0.011 | \n> 76.510\n> intset | 0.20 MB | 0.003 | 6.742 | 1.89 MB | 0.022 | \n> 52.122\n> radix | 0.20 MB | 0.001 | 1.023 | 1.07 MB | 0.007 | \n> 12.023\n> rtbm | 0.15 MB | 0.001 | 2.637 | 0.65 MB | 0.009 | \n> 34.528\n> svtm | 0.52 MB | 0.002 | 0.721 | 0.61 MB | 0.010 | \n> 6.434\n> tbm | 0.20 MB | 0.002 | 2.733 | 1.51 MB | 0.015 | \n> 38.538\n> \n> * Test 8\n> select prepare(\n> 1000000, -- max block\n> 100, -- # of dead tuples per page\n> 1, -- dead tuples interval within a page\n> 50, -- # of consecutive pages having dead tuples\n> 100 -- page interval\n> );\n> \n> There are 50 consecutive pages that have 100 dead tuples at every 100 \n> pages.\n> \n> name | attach | attach | shuffled | size_x10 | attach_x10| \n> shuffled_x10\n> --------+-----------+--------+----------+------------+-----------+-------------\n> array | 286.11 MB | 0.184 | 67.233 | 2861.03 MB | 1.743 | \n> 979.070\n> intset | 46.88 MB | 0.389 | 35.176 | 468.67 MB | 3.698 | \n> 505.322\n> radix | 21.82 MB | 0.116 | 6.160 | 186.86 MB | 0.891 | \n> 117.730\n> rtbm | 18.02 MB | 0.182 | 5.909 | 160.02 MB | 1.870 | \n> 112.550\n> svtm | 4.28 MB | 0.152 | 3.213 | 37.60 MB | 1.383 | \n> 79.073\n> tbm | 48.01 MB | 0.265 | 6.673 | 384.01 MB | 2.586 | \n> 101.327\n> \n> Overall, 'svtm' is faster and consumes less memory. 'radix' tree also\n> has good performance and memory usage.\n> \n> From these results, svtm is the best data structure among proposed\n> ideas for dead tuple storage used during lazy vacuum in terms of\n> performance and memory usage. I think it can support iteration by\n> extracting the offset of dead tuples for each block while iterating\n> chunks.\n> \n> Apart from performance and memory usage points of view, we also need\n> to consider the reusability of the code. When I started this thread, I\n> thought the best data structure would be the one optimized for\n> vacuum's dead tuple storage. However, if we can use a data structure\n> that can also be used in general, we can use it also for other\n> purposes. Moreover, if it's too optimized for the current TID system\n> (32 bits block number, 16 bits offset number, maximum block/offset\n> number, etc.) it may become a blocker for future changes.\n> \n> In that sense, radix tree also seems good since it can also be used in\n> gist vacuum as a replacement for intset, or a replacement for hash\n> table for shared buffer as discussed before. Are there any other use\n> cases? On the other hand, I’m concerned that radix tree would be an\n> over-engineering in terms of vacuum's dead tuples storage since the\n> dead tuple storage is static data and requires only lookup operation,\n> so if we want to use radix tree as dead tuple storage, I'd like to see\n> further use cases.\n\nI can evolve svtm to transparent intset replacement certainly. Using\nsame trick from radix_to_key it will store tids efficiently:\n\n shift = pg_ceil_log2_32(MaxHeapTuplesPerPage);\n tid_i = ItemPointerGetOffsetNumber(tid);\n tid_i |= ItemPointerGetBlockNumber(tid) << shift;\n\nWill do today's evening.\n\nregards\nYura Sokolov aka funny_falcon\n\n\n", "msg_date": "Tue, 27 Jul 2021 09:06:24 +0300", "msg_from": "Yura Sokolov <y.sokolov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\n\nOn 2021-07-25 19:07:18 +0300, Yura Sokolov wrote:\n> I've dreamed to write more compact structure for vacuum for three\n> years, but life didn't give me a time to.\n> \n> Let me join to friendly competition.\n> \n> I've bet on HATM approach: popcount-ing bitmaps for non-empty elements.\n\nMy concern with several of the proposals in this thread is that they\nover-optimize for this specific case. It's not actually that crucial to\nhave a crazily optimized vacuum dead tid storage datatype. Having\nsomething more general that also performs reasonably for the dead tuple\nstorage, but also performs well in a number of other cases, makes a lot\nmore sense to me.\n\n\n> (Bad radix result probably due to smaller cache in notebook's CPU ?)\n\nProbably largely due to the node dispatch. a) For some reason gcc likes\njump tables too much, I get better numbers when disabling those b) the\nnode type dispatch should be stuffed into the low bits of the pointer.\n\n\n> select prepare(1000000, 2, 100, 1);\n> \n> attach size shuffled\n> array 6ms 12MB 53.42s\n> intset 23ms 16MB 54.99s\n> rtbm 115ms 38MB 8.19s\n> tbm 186ms 100MB 8.37s\n> vtbm 105ms 59MB 9.08s\n> radix 64ms 42MB 10.41s\n> svtm 73ms 10MB 7.49s\n\n> Test4\n> \n> select prepare(1000000, 100, 1, 1);\n> \n> attach size shuffled\n> array 304ms 600MB 75.12s\n> intset 775ms 98MB 47.49s\n> rtbm 356ms 38MB 4.11s\n> tbm 539ms 100MB 4.20s\n> vtbm 493ms 42MB 4.44s\n> radix 263ms 42MB 6.05s\n> svtm 360ms 8MB 3.49s\n> \n> Therefore Specialized Vaccum Tid Map always consumes least memory amount\n> and usually faster.\n\nImpressive.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Wed, 28 Jul 2021 11:41:39 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2021-07-27 13:06:56 +0900, Masahiko Sawada wrote:\n> Apart from performance and memory usage points of view, we also need\n> to consider the reusability of the code. When I started this thread, I\n> thought the best data structure would be the one optimized for\n> vacuum's dead tuple storage. However, if we can use a data structure\n> that can also be used in general, we can use it also for other\n> purposes. Moreover, if it's too optimized for the current TID system\n> (32 bits block number, 16 bits offset number, maximum block/offset\n> number, etc.) it may become a blocker for future changes.\n\nIndeed.\n\n\n> In that sense, radix tree also seems good since it can also be used in\n> gist vacuum as a replacement for intset, or a replacement for hash\n> table for shared buffer as discussed before. Are there any other use\n> cases?\n\nYes, I think there are. Whenever there is some spatial locality it has a\ndecent chance of winning over a hash table, and it will most of the time\nwin over ordered datastructures like rbtrees (which perform very poorly\ndue to the number of branches and pointer dispatches). There's plenty\nhashtables, e.g. for caches, locks, etc, in PG that have a medium-high\ndegree of locality, so I'd expect a few potential uses. When adding\n\"tree compression\" (i.e. skip inner nodes that have a single incoming &\noutgoing node) radix trees even can deal quite performantly with\nvariable width keys.\n\n\n> On the other hand, I’m concerned that radix tree would be an\n> over-engineering in terms of vacuum's dead tuples storage since the\n> dead tuple storage is static data and requires only lookup operation,\n> so if we want to use radix tree as dead tuple storage, I'd like to see\n> further use cases.\n\nI don't think we should rely on the read-only-ness. It seems pretty\nclear that we'd want parallel dead-tuple scans at a point not too far\ninto the future?\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Wed, 28 Jul 2021 11:52:59 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jul 29, 2021 at 3:53 AM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n> On 2021-07-27 13:06:56 +0900, Masahiko Sawada wrote:\n> > Apart from performance and memory usage points of view, we also need\n> > to consider the reusability of the code. When I started this thread, I\n> > thought the best data structure would be the one optimized for\n> > vacuum's dead tuple storage. However, if we can use a data structure\n> > that can also be used in general, we can use it also for other\n> > purposes. Moreover, if it's too optimized for the current TID system\n> > (32 bits block number, 16 bits offset number, maximum block/offset\n> > number, etc.) it may become a blocker for future changes.\n>\n> Indeed.\n>\n>\n> > In that sense, radix tree also seems good since it can also be used in\n> > gist vacuum as a replacement for intset, or a replacement for hash\n> > table for shared buffer as discussed before. Are there any other use\n> > cases?\n>\n> Yes, I think there are. Whenever there is some spatial locality it has a\n> decent chance of winning over a hash table, and it will most of the time\n> win over ordered datastructures like rbtrees (which perform very poorly\n> due to the number of branches and pointer dispatches). There's plenty\n> hashtables, e.g. for caches, locks, etc, in PG that have a medium-high\n> degree of locality, so I'd expect a few potential uses. When adding\n> \"tree compression\" (i.e. skip inner nodes that have a single incoming &\n> outgoing node) radix trees even can deal quite performantly with\n> variable width keys.\n\nGood point.\n\n>\n> > On the other hand, I’m concerned that radix tree would be an\n> > over-engineering in terms of vacuum's dead tuples storage since the\n> > dead tuple storage is static data and requires only lookup operation,\n> > so if we want to use radix tree as dead tuple storage, I'd like to see\n> > further use cases.\n>\n> I don't think we should rely on the read-only-ness. It seems pretty\n> clear that we'd want parallel dead-tuple scans at a point not too far\n> into the future?\n\nIndeed. Given that the radix tree itself has other use cases, I have\nno concern about using radix tree for vacuum's dead tuples storage. It\nwill be better to have one that can be generally used and has some\noptimizations that are helpful also for vacuum's use case, rather than\nhaving one that is very optimized only for vacuum's use case.\n\nDuring the performance benchmark, I found some bugs in the radix tree\nimplementation. Also, we need the functionality of tree iteration, and\nif we have the radix tree in the source tree as a general library, we\nneed some changes since the current implementation seems to be for a\nreplacement for shared buffer’s hash table. I'll try to work on those\nstuff as PoC if you don't. What do you think?\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Thu, 29 Jul 2021 18:11:13 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Masahiko Sawada писал 2021-07-29 12:11:\n> On Thu, Jul 29, 2021 at 3:53 AM Andres Freund <andres@anarazel.de> \n> wrote:\n>> \n>> Hi,\n>> \n>> On 2021-07-27 13:06:56 +0900, Masahiko Sawada wrote:\n>> > Apart from performance and memory usage points of view, we also need\n>> > to consider the reusability of the code. When I started this thread, I\n>> > thought the best data structure would be the one optimized for\n>> > vacuum's dead tuple storage. However, if we can use a data structure\n>> > that can also be used in general, we can use it also for other\n>> > purposes. Moreover, if it's too optimized for the current TID system\n>> > (32 bits block number, 16 bits offset number, maximum block/offset\n>> > number, etc.) it may become a blocker for future changes.\n>> \n>> Indeed.\n>> \n>> \n>> > In that sense, radix tree also seems good since it can also be used in\n>> > gist vacuum as a replacement for intset, or a replacement for hash\n>> > table for shared buffer as discussed before. Are there any other use\n>> > cases?\n>> \n>> Yes, I think there are. Whenever there is some spatial locality it has \n>> a\n>> decent chance of winning over a hash table, and it will most of the \n>> time\n>> win over ordered datastructures like rbtrees (which perform very \n>> poorly\n>> due to the number of branches and pointer dispatches). There's plenty\n>> hashtables, e.g. for caches, locks, etc, in PG that have a medium-high\n>> degree of locality, so I'd expect a few potential uses. When adding\n>> \"tree compression\" (i.e. skip inner nodes that have a single incoming \n>> &\n>> outgoing node) radix trees even can deal quite performantly with\n>> variable width keys.\n> \n> Good point.\n> \n>> \n>> > On the other hand, I’m concerned that radix tree would be an\n>> > over-engineering in terms of vacuum's dead tuples storage since the\n>> > dead tuple storage is static data and requires only lookup operation,\n>> > so if we want to use radix tree as dead tuple storage, I'd like to see\n>> > further use cases.\n>> \n>> I don't think we should rely on the read-only-ness. It seems pretty\n>> clear that we'd want parallel dead-tuple scans at a point not too far\n>> into the future?\n> \n> Indeed. Given that the radix tree itself has other use cases, I have\n> no concern about using radix tree for vacuum's dead tuples storage. It\n> will be better to have one that can be generally used and has some\n> optimizations that are helpful also for vacuum's use case, rather than\n> having one that is very optimized only for vacuum's use case.\n\nMain portion of svtm that leads to memory saving is compression of many\npages at once (CHUNK). It could be combined with radix as a storage for\npointers to CHUNKs.\n\nFor a moment I'm benchmarking IntegerSet replacement based on Trie (HATM \nlike)\nand CHUNK compression, therefore datastructure could be used for gist\nvacuum as well.\n\nSince it is generic (allows to index all 64bit) it lacks of trick used\nto speedup svtm. Still on 10x test it is faster than radix.\n\nI'll send result later today after all benchmarks complete.\n\nAnd I'll try then to make mix of radix and CHUNK compression.\n\n> During the performance benchmark, I found some bugs in the radix tree\n> implementation.\n\nThere is a bug in radix_to_key_off as well:\n\n tid_i |= ItemPointerGetBlockNumber(tid) << shift;\n\nItemPointerGetBlockNumber returns uint32, therefore result after shift\nis uint32 as well.\n\nIt leads to lesser memory consumption (and therefore better times) on\n10x test, when page number exceed 2^23 (8M). It still produce \"correct\"\nresult for test since every page is filled in the same way.\n\nCould you push your fixes for radix, please?\n\nregards,\nYura Sokolov\n\ny.sokolov@postgrespro.ru\nfunny.falcon@gmail.com\n\n\n", "msg_date": "Thu, 29 Jul 2021 14:03:18 +0300", "msg_from": "Yura Sokolov <y.sokolov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jul 29, 2021 at 8:03 PM Yura Sokolov <y.sokolov@postgrespro.ru> wrote:\n>\n> Masahiko Sawada писал 2021-07-29 12:11:\n> > On Thu, Jul 29, 2021 at 3:53 AM Andres Freund <andres@anarazel.de>\n> > wrote:\n> >>\n> >> Hi,\n> >>\n> >> On 2021-07-27 13:06:56 +0900, Masahiko Sawada wrote:\n> >> > Apart from performance and memory usage points of view, we also need\n> >> > to consider the reusability of the code. When I started this thread, I\n> >> > thought the best data structure would be the one optimized for\n> >> > vacuum's dead tuple storage. However, if we can use a data structure\n> >> > that can also be used in general, we can use it also for other\n> >> > purposes. Moreover, if it's too optimized for the current TID system\n> >> > (32 bits block number, 16 bits offset number, maximum block/offset\n> >> > number, etc.) it may become a blocker for future changes.\n> >>\n> >> Indeed.\n> >>\n> >>\n> >> > In that sense, radix tree also seems good since it can also be used in\n> >> > gist vacuum as a replacement for intset, or a replacement for hash\n> >> > table for shared buffer as discussed before. Are there any other use\n> >> > cases?\n> >>\n> >> Yes, I think there are. Whenever there is some spatial locality it has\n> >> a\n> >> decent chance of winning over a hash table, and it will most of the\n> >> time\n> >> win over ordered datastructures like rbtrees (which perform very\n> >> poorly\n> >> due to the number of branches and pointer dispatches). There's plenty\n> >> hashtables, e.g. for caches, locks, etc, in PG that have a medium-high\n> >> degree of locality, so I'd expect a few potential uses. When adding\n> >> \"tree compression\" (i.e. skip inner nodes that have a single incoming\n> >> &\n> >> outgoing node) radix trees even can deal quite performantly with\n> >> variable width keys.\n> >\n> > Good point.\n> >\n> >>\n> >> > On the other hand, I’m concerned that radix tree would be an\n> >> > over-engineering in terms of vacuum's dead tuples storage since the\n> >> > dead tuple storage is static data and requires only lookup operation,\n> >> > so if we want to use radix tree as dead tuple storage, I'd like to see\n> >> > further use cases.\n> >>\n> >> I don't think we should rely on the read-only-ness. It seems pretty\n> >> clear that we'd want parallel dead-tuple scans at a point not too far\n> >> into the future?\n> >\n> > Indeed. Given that the radix tree itself has other use cases, I have\n> > no concern about using radix tree for vacuum's dead tuples storage. It\n> > will be better to have one that can be generally used and has some\n> > optimizations that are helpful also for vacuum's use case, rather than\n> > having one that is very optimized only for vacuum's use case.\n>\n> Main portion of svtm that leads to memory saving is compression of many\n> pages at once (CHUNK). It could be combined with radix as a storage for\n> pointers to CHUNKs.\n>\n> For a moment I'm benchmarking IntegerSet replacement based on Trie (HATM\n> like)\n> and CHUNK compression, therefore datastructure could be used for gist\n> vacuum as well.\n>\n> Since it is generic (allows to index all 64bit) it lacks of trick used\n> to speedup svtm. Still on 10x test it is faster than radix.\n\nBTW, how does svtm work when we add two sets of dead tuple TIDs to one\nsvtm? Dead tuple TIDs are unique sets but those sets could have TIDs\nof the different offsets on the same block. The case I imagine is the\nidea discussed on this thread[1]. With this idea, we store the\ncollected dead tuple TIDs somewhere and skip index vacuuming for some\nreason (index skipping optimization, failsafe mode, or interruptions\netc.). Then, in the next lazy vacuum timing, we load the dead tuple\nTIDs and start to scan the heap. During the heap scan in the second\nlazy vacuum, it's possible that new dead tuples will be found on the\npages that we have already stored in svtm during the first lazy\nvacuum. How can we efficiently update the chunk in the svtm?\n\nRegards,\n\n[1] https://www.postgresql.org/message-id/CA%2BTgmoZgapzekbTqdBrcH8O8Yifi10_nB7uWLB8ajAhGL21M6A%40mail.gmail.com\n\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Thu, 29 Jul 2021 23:29:13 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Masahiko Sawada писал 2021-07-29 17:29:\n> On Thu, Jul 29, 2021 at 8:03 PM Yura Sokolov <y.sokolov@postgrespro.ru> \n> wrote:\n>> \n>> Masahiko Sawada писал 2021-07-29 12:11:\n>> > On Thu, Jul 29, 2021 at 3:53 AM Andres Freund <andres@anarazel.de>\n>> > wrote:\n>> >>\n>> >> Hi,\n>> >>\n>> >> On 2021-07-27 13:06:56 +0900, Masahiko Sawada wrote:\n>> >> > Apart from performance and memory usage points of view, we also need\n>> >> > to consider the reusability of the code. When I started this thread, I\n>> >> > thought the best data structure would be the one optimized for\n>> >> > vacuum's dead tuple storage. However, if we can use a data structure\n>> >> > that can also be used in general, we can use it also for other\n>> >> > purposes. Moreover, if it's too optimized for the current TID system\n>> >> > (32 bits block number, 16 bits offset number, maximum block/offset\n>> >> > number, etc.) it may become a blocker for future changes.\n>> >>\n>> >> Indeed.\n>> >>\n>> >>\n>> >> > In that sense, radix tree also seems good since it can also be used in\n>> >> > gist vacuum as a replacement for intset, or a replacement for hash\n>> >> > table for shared buffer as discussed before. Are there any other use\n>> >> > cases?\n>> >>\n>> >> Yes, I think there are. Whenever there is some spatial locality it has\n>> >> a\n>> >> decent chance of winning over a hash table, and it will most of the\n>> >> time\n>> >> win over ordered datastructures like rbtrees (which perform very\n>> >> poorly\n>> >> due to the number of branches and pointer dispatches). There's plenty\n>> >> hashtables, e.g. for caches, locks, etc, in PG that have a medium-high\n>> >> degree of locality, so I'd expect a few potential uses. When adding\n>> >> \"tree compression\" (i.e. skip inner nodes that have a single incoming\n>> >> &\n>> >> outgoing node) radix trees even can deal quite performantly with\n>> >> variable width keys.\n>> >\n>> > Good point.\n>> >\n>> >>\n>> >> > On the other hand, I’m concerned that radix tree would be an\n>> >> > over-engineering in terms of vacuum's dead tuples storage since the\n>> >> > dead tuple storage is static data and requires only lookup operation,\n>> >> > so if we want to use radix tree as dead tuple storage, I'd like to see\n>> >> > further use cases.\n>> >>\n>> >> I don't think we should rely on the read-only-ness. It seems pretty\n>> >> clear that we'd want parallel dead-tuple scans at a point not too far\n>> >> into the future?\n>> >\n>> > Indeed. Given that the radix tree itself has other use cases, I have\n>> > no concern about using radix tree for vacuum's dead tuples storage. It\n>> > will be better to have one that can be generally used and has some\n>> > optimizations that are helpful also for vacuum's use case, rather than\n>> > having one that is very optimized only for vacuum's use case.\n>> \n>> Main portion of svtm that leads to memory saving is compression of \n>> many\n>> pages at once (CHUNK). It could be combined with radix as a storage \n>> for\n>> pointers to CHUNKs., bute\n>> \n>> For a moment I'm benchmarking IntegerSet replacement based on Trie \n>> (HATM\n>> like)\n>> and CHUNK compression, therefore datastructure could be used for gist\n>> vacuum as well.\n>> \n>> Since it is generic (allows to index all 64bit) it lacks of trick used\n>> to speedup svtm. Still on 10x test it is faster than radix.\n\nI've attached IntegerSet2 patch for pgtools repo and benchmark results.\nBranch https://github.com/funny-falcon/pgtools/tree/integerset2\n\nSVTM is measured with couple of changes from commit \n5055ef72d23482dd3e11ce\nin that branch: 1) more often compress bitmap, but slower, 2) couple of\npopcount tricks.\n\nIntegerSet consists of trie index to CHUNKS. CHUNKS is compressed bitmap\nof 2^15 (6+9) bits (almost like in SVTM, but for fixed bit width).\n\nWell, IntegerSet2 is always faster than IntegerSet and always uses\nsignificantly less memory (radix uses more memory than IntegerSet in\ncouple of tests and uses comparable in others).\n\nIntegerSet2 is not always faster than radix. It is more like radix.\nThat it because both are generic prefix trees with comparable amount of\nmemory accesses. SVTM did the trick being not multilevel prefix tree, \nbut\njust 1 level bitmap index to chunks.\n\nI believe, trie part of IntegerSet could be replaced with radix.\nIe use radix as storage for pointers to CHUNKS.\n\n> BTW, how does svtm work when we add two sets of dead tuple TIDs to one\n> svtm? Dead tuple TIDs are unique sets but those sets could have TIDs\n> of the different offsets on the same block. The case I imagine is the\n> idea discussed on this thread[1]. With this idea, we store the\n> collected dead tuple TIDs somewhere and skip index vacuuming for some\n> reason (index skipping optimization, failsafe mode, or interruptions\n> etc.). Then, in the next lazy vacuum timing, we load the dead tuple\n> TIDs and start to scan the heap. During the heap scan in the second\n> lazy vacuum, it's possible that new dead tuples will be found on the\n> pages that we have already stored in svtm during the first lazy\n> vacuum. How can we efficiently update the chunk in the svtm?\n\nIf we store tidmap to disk, then it will be serialized. Since SVTM/\nIntegerSet2 are ordered, they could be loaded in order. Then we\ncan just merge tuples in per page basis: deserialize page (or CHUNK),\nput new tuples, store again. Since both scan (scan of serilized map\nand scan of table) are in order, merging will be cheap enough.\n\nSVTM and IntegerSet2 already works in \"buffered\" way on insertion.\n(As well as IntegerSet that also does compression but in small parts).\n\nregards,\n\nYura Sokolov\ny.sokolov@postgrespro.ru\nfunny.falcon@gmail.com", "msg_date": "Thu, 29 Jul 2021 18:29:51 +0300", "msg_from": "Yura Sokolov <y.sokolov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Yura Sokolov писал 2021-07-29 18:29:\n\n> I've attached IntegerSet2 patch for pgtools repo and benchmark results.\n> Branch https://github.com/funny-falcon/pgtools/tree/integerset2\n\nStrange web-mail client... I never can be sure what it will attach...\n\nReattach benchmark results\n\n> \n> regards,\n> \n> Yura Sokolov\n> y.sokolov@postgrespro.ru\n> funny.falcon@gmail.com", "msg_date": "Thu, 29 Jul 2021 19:49:22 +0300", "msg_from": "Yura Sokolov <y.sokolov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jul 29, 2021 at 5:11 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> Indeed. Given that the radix tree itself has other use cases, I have\n> no concern about using radix tree for vacuum's dead tuples storage. It\n> will be better to have one that can be generally used and has some\n> optimizations that are helpful also for vacuum's use case, rather than\n> having one that is very optimized only for vacuum's use case.\n\nWhat I'm about to say might be a really stupid idea, especially since\nI haven't looked at any of the code already posted, but what I'm\nwondering about is whether we need a full radix tree or maybe just a\nradix-like lookup aid. For example, suppose that for a relation <= 8MB\nin size, we create an array of 1024 elements indexed by block number.\nEach element of the array stores an offset into the dead TID array.\nWhen you need to probe for a TID, you look up blkno and blkno + 1 in\nthe array and then bsearch only between those two offsets. For bigger\nrelations, a two or three level structure could be built, or it could\nalways be 3 levels. This could even be done on demand, so you\ninitialize all of the elements to some special value that means \"not\ncomputed yet\" and then fill them the first time they're needed,\nperhaps with another special value that means \"no TIDs in that block\".\n\nI don't know if this is better, but I do kind of like the fact that\nthe basic representation is just an array. It makes it really easy to\npredict how much memory will be needed for a given number of dead\nTIDs, and it's very DSM-friendly as well.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Thu, 29 Jul 2021 13:15:53 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Robert Haas писал 2021-07-29 20:15:\n> On Thu, Jul 29, 2021 at 5:11 AM Masahiko Sawada <sawada.mshk@gmail.com> \n> wrote:\n>> Indeed. Given that the radix tree itself has other use cases, I have\n>> no concern about using radix tree for vacuum's dead tuples storage. It\n>> will be better to have one that can be generally used and has some\n>> optimizations that are helpful also for vacuum's use case, rather than\n>> having one that is very optimized only for vacuum's use case.\n> \n> What I'm about to say might be a really stupid idea, especially since\n> I haven't looked at any of the code already posted, but what I'm\n> wondering about is whether we need a full radix tree or maybe just a\n> radix-like lookup aid. For example, suppose that for a relation <= 8MB\n> in size, we create an array of 1024 elements indexed by block number.\n> Each element of the array stores an offset into the dead TID array.\n> When you need to probe for a TID, you look up blkno and blkno + 1 in\n> the array and then bsearch only between those two offsets. For bigger\n> relations, a two or three level structure could be built, or it could\n> always be 3 levels. This could even be done on demand, so you\n> initialize all of the elements to some special value that means \"not\n> computed yet\" and then fill them the first time they're needed,\n> perhaps with another special value that means \"no TIDs in that block\".\n\n8MB relation is not a problem, imo. There is no need to do anything to\nhandle 8MB relation.\n\nProblem is 2TB relation. It has 256M pages and, lets suppose, 3G dead\ntuples.\n\nThen offset array will be 2GB and tuple offset array will be 6GB (2 byte\noffset per tuple). 8GB in total.\n\nWe can make offset array only for higher 3 bytes of block number.\nWe then will have 1M offset array weighted 8MB, and there will be array\nof 3byte tuple pointers (1 remaining byte from block number, and 2 bytes\nfrom Tuple) weighted 9GB.\n\nBut using per-batch compression schemes, there could be amortized\n4 byte per page and 1 byte per tuple: 1GB + 3GB = 4GB memory.\nYes, it is not as guaranteed as in array approach. But 95% of time it is\nsuch low and even lower. And better: more tuples are dead - better\ncompression works. Page with all tuples dead could be encoded as little\nas 5 bytes. Therefore, overall memory consumption is more stable and\npredictive.\n\nLower memory consumption of tuple storage means there is less chance\nindexes should be scanned twice or more times. It gives more\npredictability in user experience.\n\n> I don't know if this is better, but I do kind of like the fact that\n> the basic representation is just an array. It makes it really easy to\n> predict how much memory will be needed for a given number of dead\n> TIDs, and it's very DSM-friendly as well.\n\nWhole thing could be encoded in one single array of bytes. Just give\n\"pointer-to-array\"+\"array-size\" to constructor, and use \"bump allocator\"\ninside. Complex logical structure doesn't imply \"DSM-unfriendliness\".\nHmm.... I mean if it is suitably designed.\n\nIn fact, my code uses bump allocator internally to avoid \"per-allocation\noverhead\" of \"aset\", \"slab\" or \"generational\". And IntegerSet2 version\neven uses it for all allocations since it has no reallocatable parts.\n\nWell, if datastructure has reallocatable parts, it could be less \nfriendly\nto DSM.\n\nregards,\n\n---\nYura Sokolov\ny.sokolov@postgrespro.ru\nfunny.falcon@gmail.com\n\n\n", "msg_date": "Thu, 29 Jul 2021 21:55:30 +0300", "msg_from": "Yura Sokolov <y.sokolov@postgrespro.ru>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2021-07-29 13:15:53 -0400, Robert Haas wrote:\n> I don't know if this is better, but I do kind of like the fact that\n> the basic representation is just an array. It makes it really easy to\n> predict how much memory will be needed for a given number of dead\n> TIDs, and it's very DSM-friendly as well.\n\nI think those advantages are far outstripped by the big disadvantage of\nneeding to either size the array accurately from the start, or to\nreallocate the whole array. Our current pre-allocation behaviour is\nvery wasteful for most vacuums but doesn't handle large work_mem at all,\ncausing unnecessary index scans.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Thu, 29 Jul 2021 12:14:24 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jul 29, 2021 at 3:14 PM Andres Freund <andres@anarazel.de> wrote:\n> I think those advantages are far outstripped by the big disadvantage of\n> needing to either size the array accurately from the start, or to\n> reallocate the whole array. Our current pre-allocation behaviour is\n> very wasteful for most vacuums but doesn't handle large work_mem at all,\n> causing unnecessary index scans.\n\nI agree that the current pre-allocation behavior is bad, but I don't\nreally see that as an issue with my idea. Fixing that would require\nallocating the array in chunks, but that doesn't really affect the\ncore of the idea much, at least as I see it.\n\nBut I accept that Yura has a very good point about the memory usage of\nwhat I was proposing.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 30 Jul 2021 15:13:49 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2021-07-30 15:13:49 -0400, Robert Haas wrote:\n> On Thu, Jul 29, 2021 at 3:14 PM Andres Freund <andres@anarazel.de> wrote:\n> > I think those advantages are far outstripped by the big disadvantage of\n> > needing to either size the array accurately from the start, or to\n> > reallocate the whole array. Our current pre-allocation behaviour is\n> > very wasteful for most vacuums but doesn't handle large work_mem at all,\n> > causing unnecessary index scans.\n> \n> I agree that the current pre-allocation behavior is bad, but I don't\n> really see that as an issue with my idea. Fixing that would require\n> allocating the array in chunks, but that doesn't really affect the\n> core of the idea much, at least as I see it.\n\nWell, then it'd not really be the \"simple array approach\" anymore :)\n\n\n> But I accept that Yura has a very good point about the memory usage of\n> what I was proposing.\n\nThe lower memory usage also often will result in a better cache\nutilization - which is a crucial factor for index vacuuming when the\nindex order isn't correlated with the heap order. Cache misses really\nare a crucial performance factor there.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Fri, 30 Jul 2021 12:34:12 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jul 30, 2021 at 3:34 PM Andres Freund <andres@anarazel.de> wrote:\n> The lower memory usage also often will result in a better cache\n> utilization - which is a crucial factor for index vacuuming when the\n> index order isn't correlated with the heap order. Cache misses really\n> are a crucial performance factor there.\n\nFair enough.\n\n-- \nRobert Haas\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 30 Jul 2021 15:48:26 -0400", "msg_from": "Robert Haas <robertmhaas@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nToday I noticed the inefficiencies of our dead tuple storage once\nagain, and started theorizing about a better storage method; which is\nwhen I remembered that this thread exists, and that this thread\nalready has amazing results.\n\nAre there any plans to get the results of this thread from PoC to committable?\n\nKind regards,\n\nMatthias van de Meent\n\n\n", "msg_date": "Fri, 11 Feb 2022 13:47:01 +0100", "msg_from": "Matthias van de Meent <boekewurm+postgres@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2022-02-11 13:47:01 +0100, Matthias van de Meent wrote:\n> Today I noticed the inefficiencies of our dead tuple storage once\n> again, and started theorizing about a better storage method; which is\n> when I remembered that this thread exists, and that this thread\n> already has amazing results.\n> \n> Are there any plans to get the results of this thread from PoC to committable?\n\nI'm not currently planning to work on it personally. It'd would be awesome if\nsomebody did...\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Sat, 12 Feb 2022 18:02:55 -0800", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sun, Feb 13, 2022 at 11:02 AM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n> On 2022-02-11 13:47:01 +0100, Matthias van de Meent wrote:\n> > Today I noticed the inefficiencies of our dead tuple storage once\n> > again, and started theorizing about a better storage method; which is\n> > when I remembered that this thread exists, and that this thread\n> > already has amazing results.\n> >\n> > Are there any plans to get the results of this thread from PoC to committable?\n>\n> I'm not currently planning to work on it personally. It'd would be awesome if\n> somebody did...\n\nActually, I'm working on simplifying and improving radix tree\nimplementation for PG16 dev cycle. From the discussion so far I think\nit's better to have a data structure that can be used for\ngeneral-purpose and is also good for storing TID, not very specific to\nstore TID. So I think radix tree would be a potent candidate. I have\ndone the insertion and search implementation.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Sun, 13 Feb 2022 12:36:13 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On 2022-02-13 12:36:13 +0900, Masahiko Sawada wrote:\n> Actually, I'm working on simplifying and improving radix tree\n> implementation for PG16 dev cycle. From the discussion so far I think\n> it's better to have a data structure that can be used for\n> general-purpose and is also good for storing TID, not very specific to\n> store TID. So I think radix tree would be a potent candidate. I have\n> done the insertion and search implementation.\n\nAwesome!\n\n\n", "msg_date": "Sat, 12 Feb 2022 19:39:54 -0800", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn Sun, Feb 13, 2022 at 12:39 PM Andres Freund <andres@anarazel.de> wrote:\n>\n> On 2022-02-13 12:36:13 +0900, Masahiko Sawada wrote:\n> > Actually, I'm working on simplifying and improving radix tree\n> > implementation for PG16 dev cycle. From the discussion so far I think\n> > it's better to have a data structure that can be used for\n> > general-purpose and is also good for storing TID, not very specific to\n> > store TID. So I think radix tree would be a potent candidate. I have\n> > done the insertion and search implementation.\n>\n> Awesome!\n\nTo move this project forward, I've implemented radix tree\nimplementation from scratch while studying Andres's implementation. It\nsupports insertion, search, and iteration but not deletion yet. In my\nimplementation, I use Datum as the value so internal and lead nodes\nhave the same data structure, simplifying the implementation. The\niteration on the radix tree returns keys with the value in ascending\norder of the key. The patch has regression tests for radix tree but is\nstill in PoC state: left many debugging codes, not supported SSE2 SIMD\ninstructions, added -mavx2 flag is hard-coded.\n\nI've measured the size and loading and lookup performance for each\ncandidate data structure with two test cases: dense and sparse, by\nusing the test tool[1]. Here are the results:\n\n* Case1 - Dense (simulating the case where there are 1000 consecutive\npages each of which has 100 dead tuples, at 100 page intervals.)\nselect prepare(\n1000000, -- max block\n100, -- # of dead tuples per page\n1, -- dead tuples interval within a page\n1000, -- # of consecutive pages having dead tuples\n1100 -- page interval\n);\n\nname size attach lookup\narray 520 MB 248.60 ms 89891.92 ms\nhash 3188 MB 28029.59 ms 50850.32 ms\nintset 85 MB 644.96 ms 39801.17 ms\ntbm 96 MB 474.06 ms 6641.38 ms\nradix 37 MB 173.03 ms 9145.97 ms\nradix_tree 36 MB 184.51 ms 9729.94 ms\n\n* Case2 - Sparse (simulating a case where there are pages that have 2\ndead tuples every 1000 pages.)\nselect prepare(\n10000000, -- max block\n2, -- # of dead tuples per page\n50, -- dead tuples interval within a page\n1, -- # of consecutive pages having dead tuples\n1000 -- page interval\n);\n\nname size attach lookup\narray 125 kB 0.53 ms 82183.61 ms\nhash 1032 kB 1.31 ms 28128.33 ms\nintset 222 kB 0.51 ms 87775.68 ms\ntbm 768 MB 1.24 ms 98674.60 ms\nradix 1080 kB 1.66 ms 20698.07 ms\nradix_tree 949 kB 1.50 ms 21465.23 ms\n\nEach test virtually generates TIDs and loads them to the data\nstructure, and then searches for virtual index TIDs.\n'array' is a sorted array which is the current method, 'hash' is HTAB,\n'intset' is IntegerSet, and 'tbm' is TIDBitmap. The last two results\nare radix tree implementations: 'radix' is Andres's radix tree\nimplementation and 'radix_tree' is my radix tree implementation. In\nboth radix tree tests, I converted TIDs into an int64 and store the\nlower 6 bits in the value part of the radix tree.\n\nOverall, radix tree implementations have good numbers. Once we got an\nagreement on moving in this direction, I'll start a new thread for\nthat and move the implementation further; there are many things to do\nand discuss: deletion, API design, SIMD support, more tests etc.\n\nRegards,\n\n[1] https://github.com/MasahikoSawada/pgtools/tree/master/bdbench\n[2] https://www.postgresql.org/message-id/CAFiTN-visUO9VTz2%2Bh224z5QeUjKhKNdSfjaCucPhYJdbzxx0g%40mail.gmail.com\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/", "msg_date": "Tue, 10 May 2022 10:51:46 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, May 10, 2022 at 8:52 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> Overall, radix tree implementations have good numbers. Once we got an\n> agreement on moving in this direction, I'll start a new thread for\n> that and move the implementation further; there are many things to do\n> and discuss: deletion, API design, SIMD support, more tests etc.\n\n+1\n\n(FWIW, I think the current thread is still fine.)\n\n-- \nJohn Naylor\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 10 May 2022 16:58:31 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, May 10, 2022 at 6:58 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Tue, May 10, 2022 at 8:52 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > Overall, radix tree implementations have good numbers. Once we got an\n> > agreement on moving in this direction, I'll start a new thread for\n> > that and move the implementation further; there are many things to do\n> > and discuss: deletion, API design, SIMD support, more tests etc.\n>\n> +1\n>\n\nThanks!\n\nI've attached an updated version patch. It is still WIP but I've\nimplemented deletion and improved test cases and comments.\n\n> (FWIW, I think the current thread is still fine.)\n\nOkay, agreed.\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/", "msg_date": "Wed, 25 May 2022 11:48:16 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, May 25, 2022 at 11:48 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Tue, May 10, 2022 at 6:58 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > On Tue, May 10, 2022 at 8:52 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > Overall, radix tree implementations have good numbers. Once we got an\n> > > agreement on moving in this direction, I'll start a new thread for\n> > > that and move the implementation further; there are many things to do\n> > > and discuss: deletion, API design, SIMD support, more tests etc.\n> >\n> > +1\n> >\n>\n> Thanks!\n>\n> I've attached an updated version patch. It is still WIP but I've\n> implemented deletion and improved test cases and comments.\n\nI've attached an updated version patch that changes the configure\nscript. I'm still studying how to support AVX2 on msvc build. Also,\nadded more regression tests.\n\nThe integration with lazy vacuum and parallel vacuum is missing for\nnow. In order to support parallel vacuum, we need to have the radix\ntree support to be created on DSA.\n\nAdded this item to the next CF.\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/", "msg_date": "Thu, 16 Jun 2022 13:56:55 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jun 16, 2022 at 11:57 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> I've attached an updated version patch that changes the configure\n> script. I'm still studying how to support AVX2 on msvc build. Also,\n> added more regression tests.\n\nThanks for the update, I will take a closer look at the patch in the\nnear future, possibly next week. For now, though, I'd like to question\nwhy we even need to use 32-byte registers in the first place. For one,\nthe paper referenced has 16-pointer nodes, but none for 32 (next level\nis 48 and uses a different method to find the index of the next\npointer). Andres' prototype has 32-pointer nodes, but in a quick read\nof his patch a couple weeks ago I don't recall a reason mentioned for\nit. Even if 32-pointer nodes are better from a memory perspective, I\nimagine it should be possible to use two SSE2 registers to find the\nindex. It'd be locally slightly more complex, but not much. It might\nnot even cost much more in cycles since AVX2 would require indirecting\nthrough a function pointer. It's much more convenient if we don't need\na runtime check. There are also thermal and power disadvantages when\nusing AXV2 in some workloads. I'm not sure that's the case here, but\nif it is, we'd better be getting something in return.\n\nOne more thing in general: In an earlier version, I noticed that\nAndres used the slab allocator and documented why. The last version of\nyour patch that I saw had the same allocator, but not the \"why\".\nEspecially in early stages of review, we want to document design\ndecisions so it's more clear for the reader.\n\n-- \nJohn Naylor\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Thu, 16 Jun 2022 14:30:06 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "\nOn 2022-06-16 Th 00:56, Masahiko Sawada wrote:\n>\n> I've attached an updated version patch that changes the configure\n> script. I'm still studying how to support AVX2 on msvc build. Also,\n> added more regression tests.\n\n\nI think you would need to add '/arch:AVX2' to the compiler flags in\nMSBuildProject.pm.\n\n\nSee\n<https://docs.microsoft.com/en-us/cpp/build/reference/arch-x64?view=msvc-170>\n\n\ncheers\n\n\nandrew\n\n\n--\nAndrew Dunstan\nEDB: https://www.enterprisedb.com\n\n\n\n", "msg_date": "Thu, 16 Jun 2022 14:34:20 -0400", "msg_from": "Andrew Dunstan <andrew@dunslane.net>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn Thu, Jun 16, 2022 at 4:30 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Thu, Jun 16, 2022 at 11:57 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > I've attached an updated version patch that changes the configure\n> > script. I'm still studying how to support AVX2 on msvc build. Also,\n> > added more regression tests.\n>\n> Thanks for the update, I will take a closer look at the patch in the\n> near future, possibly next week.\n\nThanks!\n\n> For now, though, I'd like to question\n> why we even need to use 32-byte registers in the first place. For one,\n> the paper referenced has 16-pointer nodes, but none for 32 (next level\n> is 48 and uses a different method to find the index of the next\n> pointer). Andres' prototype has 32-pointer nodes, but in a quick read\n> of his patch a couple weeks ago I don't recall a reason mentioned for\n> it.\n\nI might be wrong but since AVX2 instruction set is introduced in\nHaswell microarchitecture in 2013 and the referenced paper is\npublished in the same year, the art didn't use AVX2 instruction set.\n32-pointer nodes are better from a memory perspective as you\nmentioned. Andres' prototype supports both 16-pointer nodes and\n32-pointer nodes (out of 6 node types). This would provide better\nmemory usage but on the other hand, it would also bring overhead of\nswitching the node type. Anyway, it's an important design decision to\nsupport which size of node to support. It should be done based on\nexperiment results and documented.\n\n> Even if 32-pointer nodes are better from a memory perspective, I\n> imagine it should be possible to use two SSE2 registers to find the\n> index. It'd be locally slightly more complex, but not much. It might\n> not even cost much more in cycles since AVX2 would require indirecting\n> through a function pointer. It's much more convenient if we don't need\n> a runtime check.\n\nRight.\n\n> There are also thermal and power disadvantages when\n> using AXV2 in some workloads. I'm not sure that's the case here, but\n> if it is, we'd better be getting something in return.\n\nGood point.\n\n> One more thing in general: In an earlier version, I noticed that\n> Andres used the slab allocator and documented why. The last version of\n> your patch that I saw had the same allocator, but not the \"why\".\n> Especially in early stages of review, we want to document design\n> decisions so it's more clear for the reader.\n\nIndeed. I'll add comments in the next version patch.\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Mon, 20 Jun 2022 09:56:26 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jun 20, 2022 at 7:57 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n[v3 patch]\n\nHi Masahiko,\n\nSince there are new files, and they are pretty large, I've attached\nmost specific review comments and questions as a diff rather than in\nthe email body. This is not a full review, which will take more time\n-- this is a first pass mostly to aid my understanding, and discuss\nsome of the design and performance implications.\n\nI tend to think it's a good idea to avoid most cosmetic review until\nit's close to commit, but I did mention a couple things that might\nenhance readability during review.\n\nAs I mentioned to you off-list, I have some thoughts on the nodes using SIMD:\n\n> On Thu, Jun 16, 2022 at 4:30 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > For now, though, I'd like to question\n> > why we even need to use 32-byte registers in the first place. For one,\n> > the paper referenced has 16-pointer nodes, but none for 32 (next level\n> > is 48 and uses a different method to find the index of the next\n> > pointer). Andres' prototype has 32-pointer nodes, but in a quick read\n> > of his patch a couple weeks ago I don't recall a reason mentioned for\n> > it.\n>\n> I might be wrong but since AVX2 instruction set is introduced in\n> Haswell microarchitecture in 2013 and the referenced paper is\n> published in the same year, the art didn't use AVX2 instruction set.\n\nSure, but with a bit of work the same technique could be done on that\nnode size with two 16-byte registers.\n\n> 32-pointer nodes are better from a memory perspective as you\n> mentioned. Andres' prototype supports both 16-pointer nodes and\n> 32-pointer nodes (out of 6 node types). This would provide better\n> memory usage but on the other hand, it would also bring overhead of\n> switching the node type.\n\nRight, using more node types provides smaller increments of node size.\nJust changing node type can be better or worse, depending on the\ninput.\n\n> Anyway, it's an important design decision to\n> support which size of node to support. It should be done based on\n> experiment results and documented.\n\nAgreed. I would add that in the first step, we want something\nstraightforward to read and easy to integrate into our codebase. I\nsuspect other optimizations would be worth a lot more than using AVX2:\n- collapsing inner nodes\n- taking care when constructing the key (more on this when we\nintegrate with VACUUM)\n...and a couple Andres mentioned:\n- memory management: in\nhttps://www.postgresql.org/message-id/flat/20210717194333.mr5io3zup3kxahfm%40alap3.anarazel.de\n- node dispatch:\nhttps://www.postgresql.org/message-id/20210728184139.qhvx6nbwdcvo63m6%40alap3.anarazel.de\n\nTherefore, I would suggest that we use SSE2 only, because:\n- portability is very easy\n- to avoid a performance hit from indirecting through a function pointer\n\nWhen the PG16 cycle opens, I will work separately on ensuring the\nportability of using SSE2, so you can focus on other aspects. I think\nit would be a good idea to have both node16 and node32 for testing.\nDuring benchmarking we can delete one or the other and play with the\nother thresholds a bit.\n\nIdeally, node16 and node32 would have the same code with a different\nloop count (1 or 2). More generally, there is too much duplication of\ncode (noted by Andres in his PoC), and there are many variable names\nwith the node size embedded. This is a bit tricky to make more\ngeneral, so we don't need to try it yet, but ideally we would have\nsomething similar to:\n\nswitch (node->kind) // todo: inspect tagged pointer\n{\n case RADIX_TREE_NODE_KIND_4:\n idx = node_search_eq(node, chunk, 4);\n do_action(node, idx, 4, ...);\n break;\n case RADIX_TREE_NODE_KIND_32:\n idx = node_search_eq(node, chunk, 32);\n do_action(node, idx, 32, ...);\n ...\n}\n\nstatic pg_alwaysinline void\nnode_search_eq(radix_tree_node node, uint8 chunk, int16 node_fanout)\n{\nif (node_fanout <= SIMPLE_LOOP_THRESHOLD)\n // do simple loop with (node_simple *) node;\nelse if (node_fanout <= VECTORIZED_LOOP_THRESHOLD)\n // do vectorized loop where available with (node_vec *) node;\n...\n}\n\n...and let the compiler do loop unrolling and branch removal. Not sure\nhow difficult this is to do, but something to think about.\n\nAnother thought: for non-x86 platforms, the SIMD nodes degenerate to\n\"simple loop\", and looping over up to 32 elements is not great\n(although possibly okay). We could do binary search, but that has bad\nbranch prediction.\n\n-- \nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 27 Jun 2022 18:12:13 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "> Another thought: for non-x86 platforms, the SIMD nodes degenerate to\n> \"simple loop\", and looping over up to 32 elements is not great\n> (although possibly okay). We could do binary search, but that has bad\n> branch prediction.\n\nI am not sure that for relevant non-x86 platforms SIMD / vector\ninstructions would not be used (though it would be a good idea to\nverify)\nDo you know any modern platforms that do not have SIMD ?\n\nI would definitely test before assuming binary search is better.\n\nOften other approaches like counting search over such small vectors is\nmuch better when the vector fits in cache (or even a cache line) and\nyou always visit all items as this will completely avoid branch\npredictions and allows compiler to vectorize and / or unroll the loop\nas needed.\n\nCheers\nHannu\n\n\n", "msg_date": "Mon, 27 Jun 2022 17:23:22 +0200", "msg_from": "Hannu Krosing <hannuk@google.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2022-06-27 18:12:13 +0700, John Naylor wrote:\n> Another thought: for non-x86 platforms, the SIMD nodes degenerate to\n> \"simple loop\", and looping over up to 32 elements is not great\n> (although possibly okay). We could do binary search, but that has bad\n> branch prediction.\n\nI'd be quite quite surprised if binary search were cheaper. Particularly on\nless fancy platforms.\n\n- Andres\n\n\n", "msg_date": "Mon, 27 Jun 2022 08:26:49 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jun 27, 2022 at 10:23 PM Hannu Krosing <hannuk@google.com> wrote:\n>\n> > Another thought: for non-x86 platforms, the SIMD nodes degenerate to\n> > \"simple loop\", and looping over up to 32 elements is not great\n> > (although possibly okay). We could do binary search, but that has bad\n> > branch prediction.\n>\n> I am not sure that for relevant non-x86 platforms SIMD / vector\n> instructions would not be used (though it would be a good idea to\n> verify)\n\nBy that logic, we can also dispense with intrinsics on x86 because the\ncompiler will autovectorize there too (if I understand your claim\ncorrectly). I'm not quite convinced of that in this case.\n\n> I would definitely test before assuming binary search is better.\n\nI wasn't very clear in my language, but I did reject binary search as\nhaving bad branch prediction.\n\n-- \nJohn Naylor\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 28 Jun 2022 11:17:42 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2022-06-28 11:17:42 +0700, John Naylor wrote:\n> On Mon, Jun 27, 2022 at 10:23 PM Hannu Krosing <hannuk@google.com> wrote:\n> >\n> > > Another thought: for non-x86 platforms, the SIMD nodes degenerate to\n> > > \"simple loop\", and looping over up to 32 elements is not great\n> > > (although possibly okay). We could do binary search, but that has bad\n> > > branch prediction.\n> >\n> > I am not sure that for relevant non-x86 platforms SIMD / vector\n> > instructions would not be used (though it would be a good idea to\n> > verify)\n> \n> By that logic, we can also dispense with intrinsics on x86 because the\n> compiler will autovectorize there too (if I understand your claim\n> correctly). I'm not quite convinced of that in this case.\n\nLast time I checked (maybe a year ago?) none of the popular compilers could\nautovectorize that code pattern.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Mon, 27 Jun 2022 22:02:11 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn Mon, Jun 27, 2022 at 8:12 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Jun 20, 2022 at 7:57 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> [v3 patch]\n>\n> Hi Masahiko,\n>\n> Since there are new files, and they are pretty large, I've attached\n> most specific review comments and questions as a diff rather than in\n> the email body. This is not a full review, which will take more time\n> -- this is a first pass mostly to aid my understanding, and discuss\n> some of the design and performance implications.\n>\n> I tend to think it's a good idea to avoid most cosmetic review until\n> it's close to commit, but I did mention a couple things that might\n> enhance readability during review.\n\nThank you for reviewing the patch!\n\n>\n> As I mentioned to you off-list, I have some thoughts on the nodes using SIMD:\n>\n> > On Thu, Jun 16, 2022 at 4:30 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > For now, though, I'd like to question\n> > > why we even need to use 32-byte registers in the first place. For one,\n> > > the paper referenced has 16-pointer nodes, but none for 32 (next level\n> > > is 48 and uses a different method to find the index of the next\n> > > pointer). Andres' prototype has 32-pointer nodes, but in a quick read\n> > > of his patch a couple weeks ago I don't recall a reason mentioned for\n> > > it.\n> >\n> > I might be wrong but since AVX2 instruction set is introduced in\n> > Haswell microarchitecture in 2013 and the referenced paper is\n> > published in the same year, the art didn't use AVX2 instruction set.\n>\n> Sure, but with a bit of work the same technique could be done on that\n> node size with two 16-byte registers.\n>\n> > 32-pointer nodes are better from a memory perspective as you\n> > mentioned. Andres' prototype supports both 16-pointer nodes and\n> > 32-pointer nodes (out of 6 node types). This would provide better\n> > memory usage but on the other hand, it would also bring overhead of\n> > switching the node type.\n>\n> Right, using more node types provides smaller increments of node size.\n> Just changing node type can be better or worse, depending on the\n> input.\n>\n> > Anyway, it's an important design decision to\n> > support which size of node to support. It should be done based on\n> > experiment results and documented.\n>\n> Agreed. I would add that in the first step, we want something\n> straightforward to read and easy to integrate into our codebase.\n\nAgreed.\n\n\n\n> I\n> suspect other optimizations would be worth a lot more than using AVX2:\n> - collapsing inner nodes\n> - taking care when constructing the key (more on this when we\n> integrate with VACUUM)\n> ...and a couple Andres mentioned:\n> - memory management: in\n> https://www.postgresql.org/message-id/flat/20210717194333.mr5io3zup3kxahfm%40alap3.anarazel.de\n> - node dispatch:\n> https://www.postgresql.org/message-id/20210728184139.qhvx6nbwdcvo63m6%40alap3.anarazel.de\n>\n> Therefore, I would suggest that we use SSE2 only, because:\n> - portability is very easy\n> - to avoid a performance hit from indirecting through a function pointer\n\nOkay, I'll try these optimizations and see if the performance becomes better.\n\n>\n> When the PG16 cycle opens, I will work separately on ensuring the\n> portability of using SSE2, so you can focus on other aspects.\n\nThanks!\n\n> I think it would be a good idea to have both node16 and node32 for testing.\n> During benchmarking we can delete one or the other and play with the\n> other thresholds a bit.\n\nI've done benchmark tests while changing the node types. The code base\nis v3 patch that doesn't have the optimization you mentioned below\n(memory management and node dispatch) but I added the code to use SSE2\nfor node-16 and node-32. The 'name' in the below result indicates the\nkind of instruction set (AVX2 or SSE2) and the node type used. For\ninstance, sse2_4_32_48_256 means the radix tree has four kinds of node\ntypes for each which have 4, 32, 48, and 256 pointers, respectively,\nand use SSE2 instruction set.\n\n* Case1 - Dense (simulating the case where there are 1000 consecutive\npages each of which has 100 dead tuples, at 100 page intervals.)\nselect prepare(\n1000000, -- max block\n100, -- # of dead tuples per page\n1, -- dead tuples interval within a page\n1000, -- # of consecutive pages having dead tuples\n1100 -- page interval\n);\n\n name size attach\n lookup\n avx2_4_32_128_256 1154 MB 6742.53 ms 47765.63 ms\n avx2_4_32_48_256 1839 MB 4239.35 ms 40528.39 ms\n sse2_4_16_128_256 1154 MB 6994.43 ms 40383.85 ms\n sse2_4_16_32_128_256 1154 MB 7239.35 ms 43542.39 ms\n sse2_4_16_48_256 1839 MB 4404.63 ms 36048.96 ms\n sse2_4_32_128_256 1154 MB 6688.50 ms 44902.64 ms\n\n* Case2 - Sparse (simulating a case where there are pages that have 2\ndead tuples every 1000 pages.)\nselect prepare(\n10000000, -- max block\n2, -- # of dead tuples per page\n50, -- dead tuples interval within a page\n1, -- # of consecutive pages having dead tuples\n1000 -- page interval\n);\n\n name size attach lookup\navx2_4_32_128_256 1535 kB 1.85 ms 17427.42 ms\navx2_4_32_48_256 1472 kB 2.01 ms 22176.75 ms\nsse2_4_16_128_256 1582 kB 2.16 ms 15391.12 ms\nsse2_4_16_32_128_256 1535 kB 2.14 ms 18757.86 ms\nsse2_4_16_48_256 1489 kB 1.91 ms 19210.39 ms\nsse2_4_32_128_256 1535 kB 2.05 ms 17777.55 ms\n\nThe statistics of the number of each node types are:\n\n* avx2_4_32_128_256 (dense and sparse)\n * nkeys = 90910000, height = 3, n4 = 0, n32 = 285, n128 = 916629, n256 = 31\n * nkeys = 20000, height = 3, n4 = 20000, n32 = 48, n128 = 208, n256 = 1\n\n* avx2_4_32_48_256\n * nkeys = 90910000, height = 3, n4 = 0, n32 = 285, n48 = 227, n256 = 916433\n * nkeys = 20000, height = 3, n4 = 20000, n32 = 48, n48 = 159, n256 = 50\n\n* sse2_4_16_128_256\n * nkeys = 90910000, height = 3, n4 = 0, n16 = 0, n128 = 916914, n256 = 31\n * nkeys = 20000, height = 3, n4 = 20000, n16 = 0, n128 = 256, n256 = 1\n\n* sse2_4_16_32_128_256\n * nkeys = 90910000, height = 3, n4 = 0, n16 = 0, n32 = 285, n128 =\n916629, n256 = 31\n * nkeys = 20000, height = 3, n4 = 20000, n16 = 0, n32 = 48, n128 =\n208, n256 = 1\n\n* sse2_4_16_48_256\n * nkeys = 90910000, height = 3, n4 = 0, n16 = 0, n48 = 512, n256 = 916433\n * nkeys = 20000, height = 3, n4 = 20000, n16 = 0, n48 = 207, n256 = 50\n\n* sse2_4_32_128_256\n * nkeys = 90910000, height = 3, n4 = 0, n32 = 285, n128 = 916629, n256 = 31\n * nkeys = 20000, height = 3, n4 = 20000, n32 = 48, n128 = 208, n256 = 1\n\nObservations are:\n\nIn both test cases, There is not much difference between using AVX2\nand SSE2. The more mode types, the more time it takes for loading the\ndata (see sse2_4_16_32_128_256).\n\nIn dense case, since most nodes have around 100 children, the radix\ntree that has node-128 had a good figure in terms of memory usage. On\nthe other hand, the radix tree that doesn't have node-128 has a better\nnumber in terms of insertion performance. This is probably because we\nneed to iterate over 'isset' flags from the beginning of the array in\norder to find an empty slot when inserting new data. We do the same\nthing also for node-48 but it was better than node-128 as it's up to\n48.\n\nIn terms of lookup performance, the results vary but I could not find\nany common pattern that makes the performance better or worse. Getting\nmore statistics such as the number of each node type per tree level\nmight help me.\n\n> Ideally, node16 and node32 would have the same code with a different\n> loop count (1 or 2). More generally, there is too much duplication of\n> code (noted by Andres in his PoC), and there are many variable names\n> with the node size embedded. This is a bit tricky to make more\n> general, so we don't need to try it yet, but ideally we would have\n> something similar to:\n>\n> switch (node->kind) // todo: inspect tagged pointer\n> {\n> case RADIX_TREE_NODE_KIND_4:\n> idx = node_search_eq(node, chunk, 4);\n> do_action(node, idx, 4, ...);\n> break;\n> case RADIX_TREE_NODE_KIND_32:\n> idx = node_search_eq(node, chunk, 32);\n> do_action(node, idx, 32, ...);\n> ...\n> }\n>\n> static pg_alwaysinline void\n> node_search_eq(radix_tree_node node, uint8 chunk, int16 node_fanout)\n> {\n> if (node_fanout <= SIMPLE_LOOP_THRESHOLD)\n> // do simple loop with (node_simple *) node;\n> else if (node_fanout <= VECTORIZED_LOOP_THRESHOLD)\n> // do vectorized loop where available with (node_vec *) node;\n> ...\n> }\n>\n> ...and let the compiler do loop unrolling and branch removal. Not sure\n> how difficult this is to do, but something to think about.\n\nAgreed.\n\nI'll update my patch based on your review comments and use SSE2.\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Tue, 28 Jun 2022 15:24:11 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jun 28, 2022 at 1:24 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I\n> > suspect other optimizations would be worth a lot more than using AVX2:\n> > - collapsing inner nodes\n> > - taking care when constructing the key (more on this when we\n> > integrate with VACUUM)\n> > ...and a couple Andres mentioned:\n> > - memory management: in\n> > https://www.postgresql.org/message-id/flat/20210717194333.mr5io3zup3kxahfm%40alap3.anarazel.de\n> > - node dispatch:\n> > https://www.postgresql.org/message-id/20210728184139.qhvx6nbwdcvo63m6%40alap3.anarazel.de\n> >\n> > Therefore, I would suggest that we use SSE2 only, because:\n> > - portability is very easy\n> > - to avoid a performance hit from indirecting through a function pointer\n>\n> Okay, I'll try these optimizations and see if the performance becomes better.\n\nFWIW, I think it's fine if we delay these until after committing a\ngood-enough version. The exception is key construction and I think\nthat deserves some attention now (more on this below).\n\n> I've done benchmark tests while changing the node types. The code base\n> is v3 patch that doesn't have the optimization you mentioned below\n> (memory management and node dispatch) but I added the code to use SSE2\n> for node-16 and node-32.\n\nGreat, this is helpful to visualize what's going on!\n\n> * sse2_4_16_48_256\n> * nkeys = 90910000, height = 3, n4 = 0, n16 = 0, n48 = 512, n256 = 916433\n> * nkeys = 20000, height = 3, n4 = 20000, n16 = 0, n48 = 207, n256 = 50\n>\n> * sse2_4_32_128_256\n> * nkeys = 90910000, height = 3, n4 = 0, n32 = 285, n128 = 916629, n256 = 31\n> * nkeys = 20000, height = 3, n4 = 20000, n32 = 48, n128 = 208, n256 = 1\n\n> Observations are:\n>\n> In both test cases, There is not much difference between using AVX2\n> and SSE2. The more mode types, the more time it takes for loading the\n> data (see sse2_4_16_32_128_256).\n\nGood to know. And as Andres mentioned in his PoC, more node types\nwould be a barrier for pointer tagging, since 32-bit platforms only\nhave two spare bits in the pointer.\n\n> In dense case, since most nodes have around 100 children, the radix\n> tree that has node-128 had a good figure in terms of memory usage. On\n\nLooking at the node stats, and then your benchmark code, I think key\nconstruction is a major influence, maybe more than node type. The\nkey/value scheme tested now makes sense:\n\nblockhi || blocklo || 9 bits of item offset\n\n(with the leaf nodes containing a bit map of the lowest few bits of\nthis whole thing)\n\nWe want the lower fanout nodes at the top of the tree and higher\nfanout ones at the bottom.\n\nNote some consequences: If the table has enough columns such that much\nfewer than 100 tuples fit on a page (maybe 30 or 40), then in the\ndense case the nodes above the leaves will have lower fanout (maybe\nthey will fit in a node32). Also, the bitmap values in the leaves will\nbe more empty. In other words, many tables in the wild *resemble* the\nsparse case a bit, even if truly all tuples on the page are dead.\n\nNote also that the dense case in the benchmark above has ~4500 times\nmore keys than the sparse case, and uses about ~1000 times more\nmemory. But the runtime is only 2-3 times longer. That's interesting\nto me.\n\nTo optimize for the sparse case, it seems to me that the key/value would be\n\nblockhi || 9 bits of item offset || blocklo\n\nI believe that would make the leaf nodes more dense, with fewer inner\nnodes, and could drastically speed up the sparse case, and maybe many\nrealistic dense cases. I'm curious to hear your thoughts.\n\n> the other hand, the radix tree that doesn't have node-128 has a better\n> number in terms of insertion performance. This is probably because we\n> need to iterate over 'isset' flags from the beginning of the array in\n> order to find an empty slot when inserting new data. We do the same\n> thing also for node-48 but it was better than node-128 as it's up to\n> 48.\n\nI mentioned in my diff, but for those following along, I think we can\nimprove that by iterating over the bytes and if it's 0xFF all 8 bits\nare set already so keep looking...\n\n> In terms of lookup performance, the results vary but I could not find\n> any common pattern that makes the performance better or worse. Getting\n> more statistics such as the number of each node type per tree level\n> might help me.\n\nI think that's a sign that the choice of node types might not be\nterribly important for these two cases. That's good if that's true in\ngeneral -- a future performance-critical use of this code might tweak\nthings for itself without upsetting vacuum.\n\n-- \nJohn Naylor\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 28 Jun 2022 20:09:59 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jun 28, 2022 at 10:10 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Tue, Jun 28, 2022 at 1:24 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > I\n> > > suspect other optimizations would be worth a lot more than using AVX2:\n> > > - collapsing inner nodes\n> > > - taking care when constructing the key (more on this when we\n> > > integrate with VACUUM)\n> > > ...and a couple Andres mentioned:\n> > > - memory management: in\n> > > https://www.postgresql.org/message-id/flat/20210717194333.mr5io3zup3kxahfm%40alap3.anarazel.de\n> > > - node dispatch:\n> > > https://www.postgresql.org/message-id/20210728184139.qhvx6nbwdcvo63m6%40alap3.anarazel.de\n> > >\n> > > Therefore, I would suggest that we use SSE2 only, because:\n> > > - portability is very easy\n> > > - to avoid a performance hit from indirecting through a function pointer\n> >\n> > Okay, I'll try these optimizations and see if the performance becomes better.\n>\n> FWIW, I think it's fine if we delay these until after committing a\n> good-enough version. The exception is key construction and I think\n> that deserves some attention now (more on this below).\n\nAgreed.\n\n>\n> > I've done benchmark tests while changing the node types. The code base\n> > is v3 patch that doesn't have the optimization you mentioned below\n> > (memory management and node dispatch) but I added the code to use SSE2\n> > for node-16 and node-32.\n>\n> Great, this is helpful to visualize what's going on!\n>\n> > * sse2_4_16_48_256\n> > * nkeys = 90910000, height = 3, n4 = 0, n16 = 0, n48 = 512, n256 = 916433\n> > * nkeys = 20000, height = 3, n4 = 20000, n16 = 0, n48 = 207, n256 = 50\n> >\n> > * sse2_4_32_128_256\n> > * nkeys = 90910000, height = 3, n4 = 0, n32 = 285, n128 = 916629, n256 = 31\n> > * nkeys = 20000, height = 3, n4 = 20000, n32 = 48, n128 = 208, n256 = 1\n>\n> > Observations are:\n> >\n> > In both test cases, There is not much difference between using AVX2\n> > and SSE2. The more mode types, the more time it takes for loading the\n> > data (see sse2_4_16_32_128_256).\n>\n> Good to know. And as Andres mentioned in his PoC, more node types\n> would be a barrier for pointer tagging, since 32-bit platforms only\n> have two spare bits in the pointer.\n>\n> > In dense case, since most nodes have around 100 children, the radix\n> > tree that has node-128 had a good figure in terms of memory usage. On\n>\n> Looking at the node stats, and then your benchmark code, I think key\n> construction is a major influence, maybe more than node type. The\n> key/value scheme tested now makes sense:\n>\n> blockhi || blocklo || 9 bits of item offset\n>\n> (with the leaf nodes containing a bit map of the lowest few bits of\n> this whole thing)\n>\n> We want the lower fanout nodes at the top of the tree and higher\n> fanout ones at the bottom.\n\nSo more inner nodes can fit in CPU cache, right?\n\n>\n> Note some consequences: If the table has enough columns such that much\n> fewer than 100 tuples fit on a page (maybe 30 or 40), then in the\n> dense case the nodes above the leaves will have lower fanout (maybe\n> they will fit in a node32). Also, the bitmap values in the leaves will\n> be more empty. In other words, many tables in the wild *resemble* the\n> sparse case a bit, even if truly all tuples on the page are dead.\n>\n> Note also that the dense case in the benchmark above has ~4500 times\n> more keys than the sparse case, and uses about ~1000 times more\n> memory. But the runtime is only 2-3 times longer. That's interesting\n> to me.\n>\n> To optimize for the sparse case, it seems to me that the key/value would be\n>\n> blockhi || 9 bits of item offset || blocklo\n>\n> I believe that would make the leaf nodes more dense, with fewer inner\n> nodes, and could drastically speed up the sparse case, and maybe many\n> realistic dense cases.\n\nDoes it have an effect on the number of inner nodes?\n\n> I'm curious to hear your thoughts.\n\nThank you for your analysis. It's worth trying. We use 9 bits for item\noffset but most pages don't use all bits in practice. So probably it\nmight be better to move the most significant bit of item offset to the\nleft of blockhi. Or more simply:\n\n9 bits of item offset || blockhi || blocklo\n\n>\n> > the other hand, the radix tree that doesn't have node-128 has a better\n> > number in terms of insertion performance. This is probably because we\n> > need to iterate over 'isset' flags from the beginning of the array in\n> > order to find an empty slot when inserting new data. We do the same\n> > thing also for node-48 but it was better than node-128 as it's up to\n> > 48.\n>\n> I mentioned in my diff, but for those following along, I think we can\n> improve that by iterating over the bytes and if it's 0xFF all 8 bits\n> are set already so keep looking...\n\nRight. Using 0xFF also makes the code readable so I'll change that.\n\n>\n> > In terms of lookup performance, the results vary but I could not find\n> > any common pattern that makes the performance better or worse. Getting\n> > more statistics such as the number of each node type per tree level\n> > might help me.\n>\n> I think that's a sign that the choice of node types might not be\n> terribly important for these two cases. That's good if that's true in\n> general -- a future performance-critical use of this code might tweak\n> things for itself without upsetting vacuum.\n\nAgreed.\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Mon, 4 Jul 2022 14:07:12 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nI just noticed that I had a reply forgotten in drafts...\n\nOn 2022-05-10 10:51:46 +0900, Masahiko Sawada wrote:\n> To move this project forward, I've implemented radix tree\n> implementation from scratch while studying Andres's implementation. It\n> supports insertion, search, and iteration but not deletion yet. In my\n> implementation, I use Datum as the value so internal and lead nodes\n> have the same data structure, simplifying the implementation. The\n> iteration on the radix tree returns keys with the value in ascending\n> order of the key. The patch has regression tests for radix tree but is\n> still in PoC state: left many debugging codes, not supported SSE2 SIMD\n> instructions, added -mavx2 flag is hard-coded.\n\nVery cool - thanks for picking this up.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Mon, 4 Jul 2022 13:55:55 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2022-06-16 13:56:55 +0900, Masahiko Sawada wrote:\n> diff --git a/src/backend/lib/radixtree.c b/src/backend/lib/radixtree.c\n> new file mode 100644\n> index 0000000000..bf87f932fd\n> --- /dev/null\n> +++ b/src/backend/lib/radixtree.c\n> @@ -0,0 +1,1763 @@\n> +/*-------------------------------------------------------------------------\n> + *\n> + * radixtree.c\n> + *\t\tImplementation for adaptive radix tree.\n> + *\n> + * This module employs the idea from the paper \"The Adaptive Radix Tree: ARTful\n> + * Indexing for Main-Memory Databases\" by Viktor Leis, Alfons Kemper, and Thomas\n> + * Neumann, 2013.\n> + *\n> + * There are some differences from the proposed implementation. For instance,\n> + * this radix tree module utilizes AVX2 instruction, enabling us to use 256-bit\n> + * width SIMD vector, whereas 128-bit width SIMD vector is used in the paper.\n> + * Also, there is no support for path compression and lazy path expansion. The\n> + * radix tree supports fixed length of the key so we don't expect the tree level\n> + * wouldn't be high.\n\nI think we're going to need path compression at some point, fwiw. I'd bet on\nit being beneficial even for the tid case.\n\n\n> + * The key is a 64-bit unsigned integer and the value is a Datum.\n\nI don't think it's a good idea to define the value type to be a datum.\n\n\n> +/*\n> + * As we descend a radix tree, we push the node to the stack. The stack is used\n> + * at deletion.\n> + */\n> +typedef struct radix_tree_stack_data\n> +{\n> +\tradix_tree_node *node;\n> +\tstruct radix_tree_stack_data *parent;\n> +} radix_tree_stack_data;\n> +typedef radix_tree_stack_data *radix_tree_stack;\n\nI think it's a very bad idea for traversal to need allocations. I really want\nto eventually use this for shared structures (eventually with lock-free\nsearches at least), and needing to do allocations while traversing the tree is\na no-go for that.\n\nParticularly given that the tree currently has a fixed depth, can't you just\nallocate this on the stack once?\n\n> +/*\n> + * Allocate a new node with the given node kind.\n> + */\n> +static radix_tree_node *\n> +radix_tree_alloc_node(radix_tree *tree, radix_tree_node_kind kind)\n> +{\n> +\tradix_tree_node *newnode;\n> +\n> +\tnewnode = (radix_tree_node *) MemoryContextAllocZero(tree->slabs[kind],\n> +\t\t\t\t\t\t\t\t\t\t\t\t\t\t radix_tree_node_info[kind].size);\n> +\tnewnode->kind = kind;\n> +\n> +\t/* update the statistics */\n> +\ttree->mem_used += GetMemoryChunkSpace(newnode);\n> +\ttree->cnt[kind]++;\n> +\n> +\treturn newnode;\n> +}\n\nWhy are you tracking the memory usage at this level of detail? It's *much*\ncheaper to track memory usage via the memory contexts? Since they're dedicated\nfor the radix tree, that ought to be sufficient?\n\n\n> +\t\t\t\t\telse if (idx != n4->n.count)\n> +\t\t\t\t\t{\n> +\t\t\t\t\t\t/*\n> +\t\t\t\t\t\t * the key needs to be inserted in the middle of the\n> +\t\t\t\t\t\t * array, make space for the new key.\n> +\t\t\t\t\t\t */\n> +\t\t\t\t\t\tmemmove(&(n4->chunks[idx + 1]), &(n4->chunks[idx]),\n> +\t\t\t\t\t\t\t\tsizeof(uint8) * (n4->n.count - idx));\n> +\t\t\t\t\t\tmemmove(&(n4->slots[idx + 1]), &(n4->slots[idx]),\n> +\t\t\t\t\t\t\t\tsizeof(radix_tree_node *) * (n4->n.count - idx));\n> +\t\t\t\t\t}\n\nMaybe we could add a static inline helper for these memmoves? Both because\nit's repetitive (for different node types) and because the last time I looked\ngcc was generating quite bad code for this. And having to put workarounds into\nmultiple places is obviously worse than having to do it in one place.\n\n\n> +/*\n> + * Insert the key with the val.\n> + *\n> + * found_p is set to true if the key already present, otherwise false, if\n> + * it's not NULL.\n> + *\n> + * XXX: do we need to support update_if_exists behavior?\n> + */\n\nYes, I think that's needed - hence using bfm_set() instead of insert() in the\nprototype.\n\n\n> +void\n> +radix_tree_insert(radix_tree *tree, uint64 key, Datum val, bool *found_p)\n> +{\n> +\tint\t\t\tshift;\n> +\tbool\t\treplaced;\n> +\tradix_tree_node *node;\n> +\tradix_tree_node *parent = tree->root;\n> +\n> +\t/* Empty tree, create the root */\n> +\tif (!tree->root)\n> +\t\tradix_tree_new_root(tree, key, val);\n> +\n> +\t/* Extend the tree if necessary */\n> +\tif (key > tree->max_val)\n> +\t\tradix_tree_extend(tree, key);\n\nFWIW, the reason I used separate functions for these in the prototype is that\nit turns out to generate a lot better code, because it allows non-inlined\nfunction calls to be sibling calls - thereby avoiding the need for a dedicated\nstack frame. That's not possible once you need a palloc or such, so splitting\noff those call paths into dedicated functions is useful.\n\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Mon, 4 Jul 2022 14:18:22 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2022-06-28 15:24:11 +0900, Masahiko Sawada wrote:\n> In both test cases, There is not much difference between using AVX2\n> and SSE2. The more mode types, the more time it takes for loading the\n> data (see sse2_4_16_32_128_256).\n\nYea, at some point the compiler starts using a jump table instead of branches,\nand that turns out to be a good bit more expensive. And even with branches, it\nobviously adds hard to predict branches. IIRC I fought a bit with the compiler\nto avoid some of that cost, it's possible that got \"lost\" in Sawada-san's\npatch.\n\n\nSawada-san, what led you to discard the 1 and 16 node types? IIRC the 1 node\none is not unimportant until we have path compression.\n\nRight now the node struct sizes are:\n4 - 48 bytes\n32 - 296 bytes\n128 - 1304 bytes\n256 - 2088 bytes\n\nI guess radix_tree_node_128->isset is just 16 bytes compared to 1288 other\nbytes, but needing that separate isset array somehow is sad :/. I wonder if a\nsmaller \"free index\" would do the trick? Point to the element + 1 where we\nsearched last and start a plain loop there. Particularly in an insert-only\nworkload that'll always work, and in other cases it'll still often work I\nthink.\n\n\nOne thing I was wondering about is trying to choose node types in\nroughly-power-of-two struct sizes. It's pretty easy to end up with significant\nfragmentation in the slabs right now when inserting as you go, because some of\nthe smaller node types will be freed but not enough to actually free blocks of\nmemory. If we instead have ~power-of-two sizes we could just use a single slab\nof the max size, and carve out the smaller node types out of that largest\nallocation.\n\nBtw, that fragmentation is another reason why I think it's better to track\nmemory usage via memory contexts, rather than doing so based on\nGetMemoryChunkSpace().\n\n\n> > Ideally, node16 and node32 would have the same code with a different\n> > loop count (1 or 2). More generally, there is too much duplication of\n> > code (noted by Andres in his PoC), and there are many variable names\n> > with the node size embedded. This is a bit tricky to make more\n> > general, so we don't need to try it yet, but ideally we would have\n> > something similar to:\n> >\n> > switch (node->kind) // todo: inspect tagged pointer\n> > {\n> > case RADIX_TREE_NODE_KIND_4:\n> > idx = node_search_eq(node, chunk, 4);\n> > do_action(node, idx, 4, ...);\n> > break;\n> > case RADIX_TREE_NODE_KIND_32:\n> > idx = node_search_eq(node, chunk, 32);\n> > do_action(node, idx, 32, ...);\n> > ...\n> > }\n\nFWIW, that should be doable with an inline function, if you pass it the memory\nto the \"array\" rather than the node directly. Not so sure it's a good idea to\ndo dispatch between node types / search methods inside the helper, as you\nsuggest below:\n\n\n> > static pg_alwaysinline void\n> > node_search_eq(radix_tree_node node, uint8 chunk, int16 node_fanout)\n> > {\n> > if (node_fanout <= SIMPLE_LOOP_THRESHOLD)\n> > // do simple loop with (node_simple *) node;\n> > else if (node_fanout <= VECTORIZED_LOOP_THRESHOLD)\n> > // do vectorized loop where available with (node_vec *) node;\n> > ...\n> > }\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Mon, 4 Jul 2022 15:00:38 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jul 4, 2022 at 2:07 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Tue, Jun 28, 2022 at 10:10 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > On Tue, Jun 28, 2022 at 1:24 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > > I\n> > > > suspect other optimizations would be worth a lot more than using AVX2:\n> > > > - collapsing inner nodes\n> > > > - taking care when constructing the key (more on this when we\n> > > > integrate with VACUUM)\n> > > > ...and a couple Andres mentioned:\n> > > > - memory management: in\n> > > > https://www.postgresql.org/message-id/flat/20210717194333.mr5io3zup3kxahfm%40alap3.anarazel.de\n> > > > - node dispatch:\n> > > > https://www.postgresql.org/message-id/20210728184139.qhvx6nbwdcvo63m6%40alap3.anarazel.de\n> > > >\n> > > > Therefore, I would suggest that we use SSE2 only, because:\n> > > > - portability is very easy\n> > > > - to avoid a performance hit from indirecting through a function pointer\n> > >\n> > > Okay, I'll try these optimizations and see if the performance becomes better.\n> >\n> > FWIW, I think it's fine if we delay these until after committing a\n> > good-enough version. The exception is key construction and I think\n> > that deserves some attention now (more on this below).\n>\n> Agreed.\n>\n> >\n> > > I've done benchmark tests while changing the node types. The code base\n> > > is v3 patch that doesn't have the optimization you mentioned below\n> > > (memory management and node dispatch) but I added the code to use SSE2\n> > > for node-16 and node-32.\n> >\n> > Great, this is helpful to visualize what's going on!\n> >\n> > > * sse2_4_16_48_256\n> > > * nkeys = 90910000, height = 3, n4 = 0, n16 = 0, n48 = 512, n256 = 916433\n> > > * nkeys = 20000, height = 3, n4 = 20000, n16 = 0, n48 = 207, n256 = 50\n> > >\n> > > * sse2_4_32_128_256\n> > > * nkeys = 90910000, height = 3, n4 = 0, n32 = 285, n128 = 916629, n256 = 31\n> > > * nkeys = 20000, height = 3, n4 = 20000, n32 = 48, n128 = 208, n256 = 1\n> >\n> > > Observations are:\n> > >\n> > > In both test cases, There is not much difference between using AVX2\n> > > and SSE2. The more mode types, the more time it takes for loading the\n> > > data (see sse2_4_16_32_128_256).\n> >\n> > Good to know. And as Andres mentioned in his PoC, more node types\n> > would be a barrier for pointer tagging, since 32-bit platforms only\n> > have two spare bits in the pointer.\n> >\n> > > In dense case, since most nodes have around 100 children, the radix\n> > > tree that has node-128 had a good figure in terms of memory usage. On\n> >\n> > Looking at the node stats, and then your benchmark code, I think key\n> > construction is a major influence, maybe more than node type. The\n> > key/value scheme tested now makes sense:\n> >\n> > blockhi || blocklo || 9 bits of item offset\n> >\n> > (with the leaf nodes containing a bit map of the lowest few bits of\n> > this whole thing)\n> >\n> > We want the lower fanout nodes at the top of the tree and higher\n> > fanout ones at the bottom.\n>\n> So more inner nodes can fit in CPU cache, right?\n>\n> >\n> > Note some consequences: If the table has enough columns such that much\n> > fewer than 100 tuples fit on a page (maybe 30 or 40), then in the\n> > dense case the nodes above the leaves will have lower fanout (maybe\n> > they will fit in a node32). Also, the bitmap values in the leaves will\n> > be more empty. In other words, many tables in the wild *resemble* the\n> > sparse case a bit, even if truly all tuples on the page are dead.\n> >\n> > Note also that the dense case in the benchmark above has ~4500 times\n> > more keys than the sparse case, and uses about ~1000 times more\n> > memory. But the runtime is only 2-3 times longer. That's interesting\n> > to me.\n> >\n> > To optimize for the sparse case, it seems to me that the key/value would be\n> >\n> > blockhi || 9 bits of item offset || blocklo\n> >\n> > I believe that would make the leaf nodes more dense, with fewer inner\n> > nodes, and could drastically speed up the sparse case, and maybe many\n> > realistic dense cases.\n>\n> Does it have an effect on the number of inner nodes?\n>\n> > I'm curious to hear your thoughts.\n>\n> Thank you for your analysis. It's worth trying. We use 9 bits for item\n> offset but most pages don't use all bits in practice. So probably it\n> might be better to move the most significant bit of item offset to the\n> left of blockhi. Or more simply:\n>\n> 9 bits of item offset || blockhi || blocklo\n>\n> >\n> > > the other hand, the radix tree that doesn't have node-128 has a better\n> > > number in terms of insertion performance. This is probably because we\n> > > need to iterate over 'isset' flags from the beginning of the array in\n> > > order to find an empty slot when inserting new data. We do the same\n> > > thing also for node-48 but it was better than node-128 as it's up to\n> > > 48.\n> >\n> > I mentioned in my diff, but for those following along, I think we can\n> > improve that by iterating over the bytes and if it's 0xFF all 8 bits\n> > are set already so keep looking...\n>\n> Right. Using 0xFF also makes the code readable so I'll change that.\n>\n> >\n> > > In terms of lookup performance, the results vary but I could not find\n> > > any common pattern that makes the performance better or worse. Getting\n> > > more statistics such as the number of each node type per tree level\n> > > might help me.\n> >\n> > I think that's a sign that the choice of node types might not be\n> > terribly important for these two cases. That's good if that's true in\n> > general -- a future performance-critical use of this code might tweak\n> > things for itself without upsetting vacuum.\n>\n> Agreed.\n>\n\nI've attached an updated patch that incorporated comments from John.\nHere are some comments I could not address and the reason:\n\n+// bitfield is uint32, so we don't need UINT64_C\n bitfield &= ((UINT64_C(1) << node->n.count) - 1);\n\nSince node->n.count could be 32, I think we need to use UINT64CONST() here.\n\n /* Macros for radix tree nodes */\n+// not sure why are we doing casts here?\n #define IS_LEAF_NODE(n) (((radix_tree_node *) (n))->shift == 0)\n #define IS_EMPTY_NODE(n) (((radix_tree_node *) (n))->count == 0)\n\nI've left the casts as I use IS_LEAF_NODE for rt_node_4/16/32/128/256.\n\nAlso, I've dropped the configure script support for AVX2, and support\nfor SSE2 is missing. I'll update it later.\n\nI've not addressed the comments I got from Andres yet so I'll update\nthe patch according to the discussion but the current patch would be\nmore readable than the previous one thanks to the comments from John.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/", "msg_date": "Tue, 5 Jul 2022 12:30:30 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jul 5, 2022 at 6:18 AM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n> On 2022-06-16 13:56:55 +0900, Masahiko Sawada wrote:\n> > diff --git a/src/backend/lib/radixtree.c b/src/backend/lib/radixtree.c\n> > new file mode 100644\n> > index 0000000000..bf87f932fd\n> > --- /dev/null\n> > +++ b/src/backend/lib/radixtree.c\n> > @@ -0,0 +1,1763 @@\n> > +/*-------------------------------------------------------------------------\n> > + *\n> > + * radixtree.c\n> > + * Implementation for adaptive radix tree.\n> > + *\n> > + * This module employs the idea from the paper \"The Adaptive Radix Tree: ARTful\n> > + * Indexing for Main-Memory Databases\" by Viktor Leis, Alfons Kemper, and Thomas\n> > + * Neumann, 2013.\n> > + *\n> > + * There are some differences from the proposed implementation. For instance,\n> > + * this radix tree module utilizes AVX2 instruction, enabling us to use 256-bit\n> > + * width SIMD vector, whereas 128-bit width SIMD vector is used in the paper.\n> > + * Also, there is no support for path compression and lazy path expansion. The\n> > + * radix tree supports fixed length of the key so we don't expect the tree level\n> > + * wouldn't be high.\n>\n> I think we're going to need path compression at some point, fwiw. I'd bet on\n> it being beneficial even for the tid case.\n>\n>\n> > + * The key is a 64-bit unsigned integer and the value is a Datum.\n>\n> I don't think it's a good idea to define the value type to be a datum.\n\nA datum value is convenient to represent both a pointer and a value so\nI used it to avoid defining node types for inner and leaf nodes\nseparately. Since a datum could be 4 bytes or 8 bytes depending it\nmight not be good for some platforms. But what kind of aspects do you\nnot like the idea of using datum?\n\n>\n>\n> > +/*\n> > + * As we descend a radix tree, we push the node to the stack. The stack is used\n> > + * at deletion.\n> > + */\n> > +typedef struct radix_tree_stack_data\n> > +{\n> > + radix_tree_node *node;\n> > + struct radix_tree_stack_data *parent;\n> > +} radix_tree_stack_data;\n> > +typedef radix_tree_stack_data *radix_tree_stack;\n>\n> I think it's a very bad idea for traversal to need allocations. I really want\n> to eventually use this for shared structures (eventually with lock-free\n> searches at least), and needing to do allocations while traversing the tree is\n> a no-go for that.\n>\n> Particularly given that the tree currently has a fixed depth, can't you just\n> allocate this on the stack once?\n\nYes, we can do that.\n\n>\n> > +/*\n> > + * Allocate a new node with the given node kind.\n> > + */\n> > +static radix_tree_node *\n> > +radix_tree_alloc_node(radix_tree *tree, radix_tree_node_kind kind)\n> > +{\n> > + radix_tree_node *newnode;\n> > +\n> > + newnode = (radix_tree_node *) MemoryContextAllocZero(tree->slabs[kind],\n> > + radix_tree_node_info[kind].size);\n> > + newnode->kind = kind;\n> > +\n> > + /* update the statistics */\n> > + tree->mem_used += GetMemoryChunkSpace(newnode);\n> > + tree->cnt[kind]++;\n> > +\n> > + return newnode;\n> > +}\n>\n> Why are you tracking the memory usage at this level of detail? It's *much*\n> cheaper to track memory usage via the memory contexts? Since they're dedicated\n> for the radix tree, that ought to be sufficient?\n\nIndeed. I'll use MemoryContextMemAllocated instead.\n\n>\n>\n> > + else if (idx != n4->n.count)\n> > + {\n> > + /*\n> > + * the key needs to be inserted in the middle of the\n> > + * array, make space for the new key.\n> > + */\n> > + memmove(&(n4->chunks[idx + 1]), &(n4->chunks[idx]),\n> > + sizeof(uint8) * (n4->n.count - idx));\n> > + memmove(&(n4->slots[idx + 1]), &(n4->slots[idx]),\n> > + sizeof(radix_tree_node *) * (n4->n.count - idx));\n> > + }\n>\n> Maybe we could add a static inline helper for these memmoves? Both because\n> it's repetitive (for different node types) and because the last time I looked\n> gcc was generating quite bad code for this. And having to put workarounds into\n> multiple places is obviously worse than having to do it in one place.\n\nAgreed, I'll update it.\n\n>\n>\n> > +/*\n> > + * Insert the key with the val.\n> > + *\n> > + * found_p is set to true if the key already present, otherwise false, if\n> > + * it's not NULL.\n> > + *\n> > + * XXX: do we need to support update_if_exists behavior?\n> > + */\n>\n> Yes, I think that's needed - hence using bfm_set() instead of insert() in the\n> prototype.\n\nAgreed.\n\n>\n>\n> > +void\n> > +radix_tree_insert(radix_tree *tree, uint64 key, Datum val, bool *found_p)\n> > +{\n> > + int shift;\n> > + bool replaced;\n> > + radix_tree_node *node;\n> > + radix_tree_node *parent = tree->root;\n> > +\n> > + /* Empty tree, create the root */\n> > + if (!tree->root)\n> > + radix_tree_new_root(tree, key, val);\n> > +\n> > + /* Extend the tree if necessary */\n> > + if (key > tree->max_val)\n> > + radix_tree_extend(tree, key);\n>\n> FWIW, the reason I used separate functions for these in the prototype is that\n> it turns out to generate a lot better code, because it allows non-inlined\n> function calls to be sibling calls - thereby avoiding the need for a dedicated\n> stack frame. That's not possible once you need a palloc or such, so splitting\n> off those call paths into dedicated functions is useful.\n\nThank you for the info. How much does using sibling call optimization\nhelp the performance in this case? I think that these two cases are\nused only a limited number of times: inserting the first key and\nextending the tree.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Tue, 5 Jul 2022 16:33:17 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jul 5, 2022 at 7:00 AM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n> On 2022-06-28 15:24:11 +0900, Masahiko Sawada wrote:\n> > In both test cases, There is not much difference between using AVX2\n> > and SSE2. The more mode types, the more time it takes for loading the\n> > data (see sse2_4_16_32_128_256).\n>\n> Yea, at some point the compiler starts using a jump table instead of branches,\n> and that turns out to be a good bit more expensive. And even with branches, it\n> obviously adds hard to predict branches. IIRC I fought a bit with the compiler\n> to avoid some of that cost, it's possible that got \"lost\" in Sawada-san's\n> patch.\n>\n>\n> Sawada-san, what led you to discard the 1 and 16 node types? IIRC the 1 node\n> one is not unimportant until we have path compression.\n\nI wanted to start with a smaller number of node types for simplicity.\n16 node type has been added to v4 patch I submitted[1]. I think it's\ntrade-off between better memory and the overhead of growing (and\nshrinking) the node type. I'm going to add more node types once we\nturn out based on the benchmark that it's beneficial.\n\n>\n> Right now the node struct sizes are:\n> 4 - 48 bytes\n> 32 - 296 bytes\n> 128 - 1304 bytes\n> 256 - 2088 bytes\n>\n> I guess radix_tree_node_128->isset is just 16 bytes compared to 1288 other\n> bytes, but needing that separate isset array somehow is sad :/. I wonder if a\n> smaller \"free index\" would do the trick? Point to the element + 1 where we\n> searched last and start a plain loop there. Particularly in an insert-only\n> workload that'll always work, and in other cases it'll still often work I\n> think.\n\nradix_tree_node_128->isset is used to distinguish between null-pointer\nin inner nodes and 0 in leaf nodes. So I guess we can have a flag to\nindicate a leaf or an inner so that we can interpret (Datum) 0 as\neither null-pointer or 0. Or if we define different data types for\ninner and leaf nodes probably we don't need it.\n\n\n> One thing I was wondering about is trying to choose node types in\n> roughly-power-of-two struct sizes. It's pretty easy to end up with significant\n> fragmentation in the slabs right now when inserting as you go, because some of\n> the smaller node types will be freed but not enough to actually free blocks of\n> memory. If we instead have ~power-of-two sizes we could just use a single slab\n> of the max size, and carve out the smaller node types out of that largest\n> allocation.\n\nYou meant to manage memory allocation (and free) for smaller node\ntypes by ourselves?\n\nHow about using different block size for different node types?\n\n>\n> Btw, that fragmentation is another reason why I think it's better to track\n> memory usage via memory contexts, rather than doing so based on\n> GetMemoryChunkSpace().\n\nAgreed.\n\n>\n>\n> > > Ideally, node16 and node32 would have the same code with a different\n> > > loop count (1 or 2). More generally, there is too much duplication of\n> > > code (noted by Andres in his PoC), and there are many variable names\n> > > with the node size embedded. This is a bit tricky to make more\n> > > general, so we don't need to try it yet, but ideally we would have\n> > > something similar to:\n> > >\n> > > switch (node->kind) // todo: inspect tagged pointer\n> > > {\n> > > case RADIX_TREE_NODE_KIND_4:\n> > > idx = node_search_eq(node, chunk, 4);\n> > > do_action(node, idx, 4, ...);\n> > > break;\n> > > case RADIX_TREE_NODE_KIND_32:\n> > > idx = node_search_eq(node, chunk, 32);\n> > > do_action(node, idx, 32, ...);\n> > > ...\n> > > }\n>\n> FWIW, that should be doable with an inline function, if you pass it the memory\n> to the \"array\" rather than the node directly. Not so sure it's a good idea to\n> do dispatch between node types / search methods inside the helper, as you\n> suggest below:\n>\n>\n> > > static pg_alwaysinline void\n> > > node_search_eq(radix_tree_node node, uint8 chunk, int16 node_fanout)\n> > > {\n> > > if (node_fanout <= SIMPLE_LOOP_THRESHOLD)\n> > > // do simple loop with (node_simple *) node;\n> > > else if (node_fanout <= VECTORIZED_LOOP_THRESHOLD)\n> > > // do vectorized loop where available with (node_vec *) node;\n> > > ...\n> > > }\n\nYeah, It's worth trying at some point.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Tue, 5 Jul 2022 16:33:29 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2022-07-05 16:33:17 +0900, Masahiko Sawada wrote:\n> On Tue, Jul 5, 2022 at 6:18 AM Andres Freund <andres@anarazel.de> wrote:\n> A datum value is convenient to represent both a pointer and a value so\n> I used it to avoid defining node types for inner and leaf nodes\n> separately.\n\nI'm not convinced that's a good goal. I think we're going to want to have\ndifferent key and value types, and trying to unify leaf and inner nodes is\ngoing to make that impossible.\n\nConsider e.g. using it for something like a buffer mapping table - your key\nmight be way too wide to fit it sensibly into 64bit.\n\n\n> Since a datum could be 4 bytes or 8 bytes depending it might not be good for\n> some platforms.\n\nRight - thats another good reason why it's problematic. A lot of key types\naren't going to be 4/8 bytes dependent on 32/64bit, but either / or.\n\n\n> > > +void\n> > > +radix_tree_insert(radix_tree *tree, uint64 key, Datum val, bool *found_p)\n> > > +{\n> > > + int shift;\n> > > + bool replaced;\n> > > + radix_tree_node *node;\n> > > + radix_tree_node *parent = tree->root;\n> > > +\n> > > + /* Empty tree, create the root */\n> > > + if (!tree->root)\n> > > + radix_tree_new_root(tree, key, val);\n> > > +\n> > > + /* Extend the tree if necessary */\n> > > + if (key > tree->max_val)\n> > > + radix_tree_extend(tree, key);\n> >\n> > FWIW, the reason I used separate functions for these in the prototype is that\n> > it turns out to generate a lot better code, because it allows non-inlined\n> > function calls to be sibling calls - thereby avoiding the need for a dedicated\n> > stack frame. That's not possible once you need a palloc or such, so splitting\n> > off those call paths into dedicated functions is useful.\n> \n> Thank you for the info. How much does using sibling call optimization\n> help the performance in this case? I think that these two cases are\n> used only a limited number of times: inserting the first key and\n> extending the tree.\n\nIt's not that it helps in the cases moved into separate functions - it's that\nnot having that code in the \"normal\" paths keeps the normal path faster.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Tue, 5 Jul 2022 01:09:23 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2022-07-05 16:33:29 +0900, Masahiko Sawada wrote:\n> > One thing I was wondering about is trying to choose node types in\n> > roughly-power-of-two struct sizes. It's pretty easy to end up with significant\n> > fragmentation in the slabs right now when inserting as you go, because some of\n> > the smaller node types will be freed but not enough to actually free blocks of\n> > memory. If we instead have ~power-of-two sizes we could just use a single slab\n> > of the max size, and carve out the smaller node types out of that largest\n> > allocation.\n> \n> You meant to manage memory allocation (and free) for smaller node\n> types by ourselves?\n\nFor all of them basically. Using a single slab allocator and then subdividing\nthe \"common block size\" into however many chunks that fit into a single node\ntype.\n\n> How about using different block size for different node types?\n\nNot following...\n\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Tue, 5 Jul 2022 01:11:26 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jul 4, 2022 at 12:07 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> > Looking at the node stats, and then your benchmark code, I think key\n> > construction is a major influence, maybe more than node type. The\n> > key/value scheme tested now makes sense:\n> >\n> > blockhi || blocklo || 9 bits of item offset\n> >\n> > (with the leaf nodes containing a bit map of the lowest few bits of\n> > this whole thing)\n> >\n> > We want the lower fanout nodes at the top of the tree and higher\n> > fanout ones at the bottom.\n>\n> So more inner nodes can fit in CPU cache, right?\n\nMy thinking is, on average, there will be more dense space utilization\nin the leaf bitmaps, and fewer inner nodes. I'm not quite sure about\ncache, since with my idea a search might have to visit more nodes to\nget the common negative result (indexed tid not found in vacuum's\nlist).\n\n> > Note some consequences: If the table has enough columns such that much\n> > fewer than 100 tuples fit on a page (maybe 30 or 40), then in the\n> > dense case the nodes above the leaves will have lower fanout (maybe\n> > they will fit in a node32). Also, the bitmap values in the leaves will\n> > be more empty. In other words, many tables in the wild *resemble* the\n> > sparse case a bit, even if truly all tuples on the page are dead.\n> >\n> > Note also that the dense case in the benchmark above has ~4500 times\n> > more keys than the sparse case, and uses about ~1000 times more\n> > memory. But the runtime is only 2-3 times longer. That's interesting\n> > to me.\n> >\n> > To optimize for the sparse case, it seems to me that the key/value would be\n> >\n> > blockhi || 9 bits of item offset || blocklo\n> >\n> > I believe that would make the leaf nodes more dense, with fewer inner\n> > nodes, and could drastically speed up the sparse case, and maybe many\n> > realistic dense cases.\n>\n> Does it have an effect on the number of inner nodes?\n>\n> > I'm curious to hear your thoughts.\n>\n> Thank you for your analysis. It's worth trying. We use 9 bits for item\n> offset but most pages don't use all bits in practice. So probably it\n> might be better to move the most significant bit of item offset to the\n> left of blockhi. Or more simply:\n>\n> 9 bits of item offset || blockhi || blocklo\n\nA concern here is most tids won't use many bits in blockhi either,\nmost often far fewer, so this would make the tree higher, I think.\nEach value of blockhi represents 0.5GB of heap (32TB max). Even with\nvery large tables I'm guessing most pages of interest to vacuum are\nconcentrated in a few of these 0.5GB \"segments\".\n\nAnd it's possible path compression would change the tradeoffs here.\n\n-- \nJohn Naylor\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 5 Jul 2022 15:49:14 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jul 5, 2022 at 5:09 PM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n> On 2022-07-05 16:33:17 +0900, Masahiko Sawada wrote:\n> > On Tue, Jul 5, 2022 at 6:18 AM Andres Freund <andres@anarazel.de> wrote:\n> > A datum value is convenient to represent both a pointer and a value so\n> > I used it to avoid defining node types for inner and leaf nodes\n> > separately.\n>\n> I'm not convinced that's a good goal. I think we're going to want to have\n> different key and value types, and trying to unify leaf and inner nodes is\n> going to make that impossible.\n>\n> Consider e.g. using it for something like a buffer mapping table - your key\n> might be way too wide to fit it sensibly into 64bit.\n\nRight. It seems to be better to have an interface so that the user of\nthe radix tree can specify the arbitrary key size (and perhaps value\nsize too?) on creation. And we can have separate leaf node types that\nhave values instead of pointers. If the value size is less than\npointer size, we can have values within leaf nodes but if it’s bigger\nprobably the leaf node can have pointers to memory where to store the\nvalue.\n\n>\n>\n> > Since a datum could be 4 bytes or 8 bytes depending it might not be good for\n> > some platforms.\n>\n> Right - thats another good reason why it's problematic. A lot of key types\n> aren't going to be 4/8 bytes dependent on 32/64bit, but either / or.\n>\n>\n> > > > +void\n> > > > +radix_tree_insert(radix_tree *tree, uint64 key, Datum val, bool *found_p)\n> > > > +{\n> > > > + int shift;\n> > > > + bool replaced;\n> > > > + radix_tree_node *node;\n> > > > + radix_tree_node *parent = tree->root;\n> > > > +\n> > > > + /* Empty tree, create the root */\n> > > > + if (!tree->root)\n> > > > + radix_tree_new_root(tree, key, val);\n> > > > +\n> > > > + /* Extend the tree if necessary */\n> > > > + if (key > tree->max_val)\n> > > > + radix_tree_extend(tree, key);\n> > >\n> > > FWIW, the reason I used separate functions for these in the prototype is that\n> > > it turns out to generate a lot better code, because it allows non-inlined\n> > > function calls to be sibling calls - thereby avoiding the need for a dedicated\n> > > stack frame. That's not possible once you need a palloc or such, so splitting\n> > > off those call paths into dedicated functions is useful.\n> >\n> > Thank you for the info. How much does using sibling call optimization\n> > help the performance in this case? I think that these two cases are\n> > used only a limited number of times: inserting the first key and\n> > extending the tree.\n>\n> It's not that it helps in the cases moved into separate functions - it's that\n> not having that code in the \"normal\" paths keeps the normal path faster.\n\nThanks, understood.\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Wed, 6 Jul 2022 22:43:09 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jul 5, 2022 at 5:49 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Jul 4, 2022 at 12:07 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > > Looking at the node stats, and then your benchmark code, I think key\n> > > construction is a major influence, maybe more than node type. The\n> > > key/value scheme tested now makes sense:\n> > >\n> > > blockhi || blocklo || 9 bits of item offset\n> > >\n> > > (with the leaf nodes containing a bit map of the lowest few bits of\n> > > this whole thing)\n> > >\n> > > We want the lower fanout nodes at the top of the tree and higher\n> > > fanout ones at the bottom.\n> >\n> > So more inner nodes can fit in CPU cache, right?\n>\n> My thinking is, on average, there will be more dense space utilization\n> in the leaf bitmaps, and fewer inner nodes. I'm not quite sure about\n> cache, since with my idea a search might have to visit more nodes to\n> get the common negative result (indexed tid not found in vacuum's\n> list).\n>\n> > > Note some consequences: If the table has enough columns such that much\n> > > fewer than 100 tuples fit on a page (maybe 30 or 40), then in the\n> > > dense case the nodes above the leaves will have lower fanout (maybe\n> > > they will fit in a node32). Also, the bitmap values in the leaves will\n> > > be more empty. In other words, many tables in the wild *resemble* the\n> > > sparse case a bit, even if truly all tuples on the page are dead.\n> > >\n> > > Note also that the dense case in the benchmark above has ~4500 times\n> > > more keys than the sparse case, and uses about ~1000 times more\n> > > memory. But the runtime is only 2-3 times longer. That's interesting\n> > > to me.\n> > >\n> > > To optimize for the sparse case, it seems to me that the key/value would be\n> > >\n> > > blockhi || 9 bits of item offset || blocklo\n> > >\n> > > I believe that would make the leaf nodes more dense, with fewer inner\n> > > nodes, and could drastically speed up the sparse case, and maybe many\n> > > realistic dense cases.\n> >\n> > Does it have an effect on the number of inner nodes?\n> >\n> > > I'm curious to hear your thoughts.\n> >\n> > Thank you for your analysis. It's worth trying. We use 9 bits for item\n> > offset but most pages don't use all bits in practice. So probably it\n> > might be better to move the most significant bit of item offset to the\n> > left of blockhi. Or more simply:\n> >\n> > 9 bits of item offset || blockhi || blocklo\n>\n> A concern here is most tids won't use many bits in blockhi either,\n> most often far fewer, so this would make the tree higher, I think.\n> Each value of blockhi represents 0.5GB of heap (32TB max). Even with\n> very large tables I'm guessing most pages of interest to vacuum are\n> concentrated in a few of these 0.5GB \"segments\".\n\nRight.\n\nI guess that the tree height is affected by where garbages are, right?\nFor example, even if all garbage in the table is concentrated in\n0.5GB, if they exist between 2^17 and 2^18 block, we use the first\nbyte of blockhi. If the table is larger than 128GB, the second byte of\nthe blockhi could be used depending on where the garbage exists.\n\nAnother variation of how to store TID would be that we use the block\nnumber as a key and store a bitmap of the offset as a value. We can\nuse Bitmapset for example, or an approach like Roaring bitmap.\n\nI think that at this stage it's better to define the design first. For\nexample, key size and value size, and these sizes are fixed or can be\nset the arbitary size? Given the use case of buffer mapping, we would\nneed a wider key to store RelFileNode, ForkNumber, and BlockNumber. On\nthe other hand, limiting the key size is 64 bit integer makes the\nlogic simple, and possibly it could still be used in buffer mapping\ncases by using a tree of a tree. For value size, if we support\ndifferent value sizes specified by the user, we can either embed\nmultiple values in the leaf node (called Multi-value leaves in ART\npaper) or introduce a leaf node that stores one value (called\nSingle-value leaves).\n\n> And it's possible path compression would change the tradeoffs here.\n\nAgreed.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Fri, 8 Jul 2022 11:09:44 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jul 8, 2022 at 9:10 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> I guess that the tree height is affected by where garbages are, right?\n> For example, even if all garbage in the table is concentrated in\n> 0.5GB, if they exist between 2^17 and 2^18 block, we use the first\n> byte of blockhi. If the table is larger than 128GB, the second byte of\n> the blockhi could be used depending on where the garbage exists.\n\nRight.\n\n> Another variation of how to store TID would be that we use the block\n> number as a key and store a bitmap of the offset as a value. We can\n> use Bitmapset for example,\n\nI like the idea of using existing code to set/check a bitmap if it's\nconvenient. But (in case that was implied here) I'd really like to\nstay away from variable-length values, which would require\n\"Single-value leaves\" (slow). I also think it's fine to treat the\nkey/value as just bits, and not care where exactly they came from, as\nwe've been talking about.\n\n> or an approach like Roaring bitmap.\n\nThis would require two new data structures instead of one. That\ndoesn't seem like a path to success.\n\n> I think that at this stage it's better to define the design first. For\n> example, key size and value size, and these sizes are fixed or can be\n> set the arbitary size?\n\nI don't think we need to start over. Andres' prototype had certain\ndesign decisions built in for the intended use case (although maybe\nnot clearly documented as such). Subsequent patches in this thread\nsubstantially changed many design aspects. If there were any changes\nthat made things wonderful for vacuum, it wasn't explained, but Andres\ndid explain how some of these changes were not good for other uses.\nGoing to fixed 64-bit keys and values should still allow many future\napplications, so let's do that if there's no reason not to.\n\n> For value size, if we support\n> different value sizes specified by the user, we can either embed\n> multiple values in the leaf node (called Multi-value leaves in ART\n> paper)\n\nI don't think \"Multi-value leaves\" allow for variable-length values,\nFWIW. And now I see I also used this term wrong in my earlier review\ncomment -- v3/4 don't actually use \"multi-value leaves\", but Andres'\ndoes (going by the multiple leaf types). From the paper: \"Multi-value\nleaves: The values are stored in one of four different leaf node\ntypes, which mirror the structure of inner nodes, but contain values\ninstead of pointers.\"\n\n(It seems v3/v4 could be called a variation of \"Combined pointer/value\nslots: If values fit into pointers, no separate node types are\nnecessary. Instead, each pointer storage location in an inner node can\neither store a pointer or a value.\" But without the advantage of\nvariable length keys).\n\n-- \nJohn Naylor\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 8 Jul 2022 13:43:32 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jul 8, 2022 at 3:43 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> On Fri, Jul 8, 2022 at 9:10 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I guess that the tree height is affected by where garbages are, right?\n> > For example, even if all garbage in the table is concentrated in\n> > 0.5GB, if they exist between 2^17 and 2^18 block, we use the first\n> > byte of blockhi. If the table is larger than 128GB, the second byte of\n> > the blockhi could be used depending on where the garbage exists.\n>\n> Right.\n>\n> > Another variation of how to store TID would be that we use the block\n> > number as a key and store a bitmap of the offset as a value. We can\n> > use Bitmapset for example,\n>\n> I like the idea of using existing code to set/check a bitmap if it's\n> convenient. But (in case that was implied here) I'd really like to\n> stay away from variable-length values, which would require\n> \"Single-value leaves\" (slow). I also think it's fine to treat the\n> key/value as just bits, and not care where exactly they came from, as\n> we've been talking about.\n>\n> > or an approach like Roaring bitmap.\n>\n> This would require two new data structures instead of one. That\n> doesn't seem like a path to success.\n\nAgreed.\n\n>\n> > I think that at this stage it's better to define the design first. For\n> > example, key size and value size, and these sizes are fixed or can be\n> > set the arbitary size?\n>\n> I don't think we need to start over. Andres' prototype had certain\n> design decisions built in for the intended use case (although maybe\n> not clearly documented as such). Subsequent patches in this thread\n> substantially changed many design aspects. If there were any changes\n> that made things wonderful for vacuum, it wasn't explained, but Andres\n> did explain how some of these changes were not good for other uses.\n> Going to fixed 64-bit keys and values should still allow many future\n> applications, so let's do that if there's no reason not to.\n\nI thought Andres pointed out that given that we store BufferTag (or\npart of that) into the key, the fixed 64-bit keys might not be enough\nfor buffer mapping use cases. If we want to use wider keys more than\n64-bit, we would need to consider it.\n\n>\n> > For value size, if we support\n> > different value sizes specified by the user, we can either embed\n> > multiple values in the leaf node (called Multi-value leaves in ART\n> > paper)\n>\n> I don't think \"Multi-value leaves\" allow for variable-length values,\n> FWIW. And now I see I also used this term wrong in my earlier review\n> comment -- v3/4 don't actually use \"multi-value leaves\", but Andres'\n> does (going by the multiple leaf types). From the paper: \"Multi-value\n> leaves: The values are stored in one of four different leaf node\n> types, which mirror the structure of inner nodes, but contain values\n> instead of pointers.\"\n\nRight, but sorry I meant the user specifies the arbitrary fixed-size\nvalue length on creation like we do in dynahash.c.\n\n>\n> (It seems v3/v4 could be called a variation of \"Combined pointer/value\n> slots: If values fit into pointers, no separate node types are\n> necessary. Instead, each pointer storage location in an inner node can\n> either store a pointer or a value.\" But without the advantage of\n> variable length keys).\n\nAgreed.\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Tue, 12 Jul 2022 10:16:21 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jul 12, 2022 at 8:16 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> > > I think that at this stage it's better to define the design first. For\n> > > example, key size and value size, and these sizes are fixed or can be\n> > > set the arbitary size?\n> >\n> > I don't think we need to start over. Andres' prototype had certain\n> > design decisions built in for the intended use case (although maybe\n> > not clearly documented as such). Subsequent patches in this thread\n> > substantially changed many design aspects. If there were any changes\n> > that made things wonderful for vacuum, it wasn't explained, but Andres\n> > did explain how some of these changes were not good for other uses.\n> > Going to fixed 64-bit keys and values should still allow many future\n> > applications, so let's do that if there's no reason not to.\n>\n> I thought Andres pointed out that given that we store BufferTag (or\n> part of that) into the key, the fixed 64-bit keys might not be enough\n> for buffer mapping use cases. If we want to use wider keys more than\n> 64-bit, we would need to consider it.\n\nIt sounds like you've answered your own question, then. If so, I'm\ncurious what your current thinking is.\n\nIf we *did* want to have maximum flexibility, then \"single-value\nleaves\" method would be the way to go, since it seems to be the\neasiest way to have variable-length both keys and values. I do have a\nconcern that the extra pointer traversal would be a drag on\nperformance, and also require lots of small memory allocations. If we\nhappened to go that route, your idea upthread of using a bitmapset of\nitem offsets in the leaves sounds like a good fit for that.\n\nI also have some concerns about also simultaneously trying to design\nfor the use for buffer mappings. I certainly want to make this good\nfor as many future uses as possible, and I'd really like to preserve\nany optimizations already fought for. However, to make concrete\nprogress on the thread subject, I also don't think it's the most\nproductive use of time to get tied up about the fine details of\nsomething that will not likely happen for several years at the\nearliest.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Thu, 14 Jul 2022 11:16:55 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jul 14, 2022 at 1:17 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Tue, Jul 12, 2022 at 8:16 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > > > I think that at this stage it's better to define the design first. For\n> > > > example, key size and value size, and these sizes are fixed or can be\n> > > > set the arbitary size?\n> > >\n> > > I don't think we need to start over. Andres' prototype had certain\n> > > design decisions built in for the intended use case (although maybe\n> > > not clearly documented as such). Subsequent patches in this thread\n> > > substantially changed many design aspects. If there were any changes\n> > > that made things wonderful for vacuum, it wasn't explained, but Andres\n> > > did explain how some of these changes were not good for other uses.\n> > > Going to fixed 64-bit keys and values should still allow many future\n> > > applications, so let's do that if there's no reason not to.\n> >\n> > I thought Andres pointed out that given that we store BufferTag (or\n> > part of that) into the key, the fixed 64-bit keys might not be enough\n> > for buffer mapping use cases. If we want to use wider keys more than\n> > 64-bit, we would need to consider it.\n>\n> It sounds like you've answered your own question, then. If so, I'm\n> curious what your current thinking is.\n>\n> If we *did* want to have maximum flexibility, then \"single-value\n> leaves\" method would be the way to go, since it seems to be the\n> easiest way to have variable-length both keys and values. I do have a\n> concern that the extra pointer traversal would be a drag on\n> performance, and also require lots of small memory allocations.\n\nAgreed.\n\n> I also have some concerns about also simultaneously trying to design\n> for the use for buffer mappings. I certainly want to make this good\n> for as many future uses as possible, and I'd really like to preserve\n> any optimizations already fought for. However, to make concrete\n> progress on the thread subject, I also don't think it's the most\n> productive use of time to get tied up about the fine details of\n> something that will not likely happen for several years at the\n> earliest.\n\nI’d like to keep the first version simple. We can improve it and add\nmore optimizations later. Using radix tree for vacuum TID storage\nwould still be a big win comparing to using a flat array, even without\nall these optimizations. In terms of single-value leaves method, I'm\nalso concerned about an extra pointer traversal and extra memory\nallocation. It's most flexible but multi-value leaves method is also\nflexible enough for many use cases. Using the single-value method\nseems to be too much as the first step for me.\n\nOverall, using 64-bit keys and 64-bit values would be a reasonable\nchoice for me as the first step . It can cover wider use cases\nincluding vacuum TID use cases. And possibly it can cover use cases by\ncombining a hash table or using tree of tree, for example.\n\nRegards,\n\n-- \nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Tue, 19 Jul 2022 11:10:42 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2022-07-08 11:09:44 +0900, Masahiko Sawada wrote:\n> I think that at this stage it's better to define the design first. For\n> example, key size and value size, and these sizes are fixed or can be\n> set the arbitary size? Given the use case of buffer mapping, we would\n> need a wider key to store RelFileNode, ForkNumber, and BlockNumber. On\n> the other hand, limiting the key size is 64 bit integer makes the\n> logic simple, and possibly it could still be used in buffer mapping\n> cases by using a tree of a tree. For value size, if we support\n> different value sizes specified by the user, we can either embed\n> multiple values in the leaf node (called Multi-value leaves in ART\n> paper) or introduce a leaf node that stores one value (called\n> Single-value leaves).\n\nFWIW, I think the best path forward would be to do something similar to the\nsimplehash.h approach, so it can be customized to the specific user.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Mon, 18 Jul 2022 19:24:36 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jul 19, 2022 at 9:24 AM Andres Freund <andres@anarazel.de> wrote:\n> FWIW, I think the best path forward would be to do something similar to\nthe\n> simplehash.h approach, so it can be customized to the specific user.\n\nI figured that would come up at some point. It may be worth doing in the\nfuture, but I think it's way too much to ask for the first use case.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Tue, Jul 19, 2022 at 9:24 AM Andres Freund <andres@anarazel.de> wrote:> FWIW, I think the best path forward would be to do something similar to the> simplehash.h approach, so it can be customized to the specific user.I figured that would come up at some point. It may be worth doing in the future, but I think it's way too much to ask for the first use case.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Tue, 19 Jul 2022 11:10:33 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jul 18, 2022 at 9:10 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n> On Tue, Jul 19, 2022 at 9:24 AM Andres Freund <andres@anarazel.de> wrote:\n> > FWIW, I think the best path forward would be to do something similar to the\n> > simplehash.h approach, so it can be customized to the specific user.\n>\n> I figured that would come up at some point. It may be worth doing in the future, but I think it's way too much to ask for the first use case.\n\nI have a prototype patch that creates a read-only snapshot of the\nvisibility map, and has vacuumlazy.c work off of that when determining\nwith pages to skip. The patch also gets rid of the\nSKIP_PAGES_THRESHOLD stuff. This is very effective with TPC-C,\nprincipally because it really cuts down on the number of scanned_pages\nthat are scanned only because the VM bit is unset concurrently by DML.\nThe window for this is very large when the table is large (and\nnaturally takes a long time to scan), resulting in many more \"dead but\nnot yet removable\" tuples being encountered than necessary. Which\nitself causes bogus information in the FSM -- information about the\nspace that VACUUM could free from the page, which is often highly\nmisleading.\n\nThere are remaining questions about how to do this properly. Right now\nI'm just copying pages from the VM into local memory, right after\nOldestXmin is first acquired -- we \"lock in\" a snapshot of the VM at\nthe earliest opportunity, which is what lazy_scan_skip() actually\nworks off now. There needs to be some consideration given to the\nresource management aspects of this -- it needs to use memory\nsensibly, which the current prototype patch doesn't do at all. I'm\nprobably going to seriously pursue this as a project soon, and will\nprobably need some kind of data structure for the local copy. The raw\npages are usually quite space inefficient, considering we only need an\nimmutable snapshot of the VM.\n\nI wonder if it makes sense to use this as part of this project. It\nwill be possible to know the exact heap pages that will become\nscanned_pages before scanning even one page with this design (perhaps\nwith caveats about low memory conditions). It could also be very\neffective as a way of speeding up TID lookups in the reasonably common\ncase where most scanned_pages don't have any LP_DEAD items -- just\nlook it up in our local/materialized copy of the VM first. But even\nwhen LP_DEAD items are spread fairly evenly, it could still give us\nreliable information about the distribution of LP_DEAD items very\nearly on.\n\nMaybe the two data structures could even be combined in some way? You\ncan use more memory for the local copy of the VM if you know that you\nwon't need the memory for dead_items. It's kinda the same problem, in\na way.\n\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Mon, 18 Jul 2022 21:29:17 -0700", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jul 19, 2022 at 9:11 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> I’d like to keep the first version simple. We can improve it and add\n> more optimizations later. Using radix tree for vacuum TID storage\n> would still be a big win comparing to using a flat array, even without\n> all these optimizations. In terms of single-value leaves method, I'm\n> also concerned about an extra pointer traversal and extra memory\n> allocation. It's most flexible but multi-value leaves method is also\n> flexible enough for many use cases. Using the single-value method\n> seems to be too much as the first step for me.\n>\n> Overall, using 64-bit keys and 64-bit values would be a reasonable\n> choice for me as the first step . It can cover wider use cases\n> including vacuum TID use cases. And possibly it can cover use cases by\n> combining a hash table or using tree of tree, for example.\n\nThese two aspects would also bring it closer to Andres' prototype, which 1)\nmakes review easier and 2) easier to preserve optimization work already\ndone, so +1 from me.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Tue, Jul 19, 2022 at 9:11 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> I’d like to keep the first version simple. We can improve it and add> more optimizations later. Using radix tree for vacuum TID storage> would still be a big win comparing to using a flat array, even without> all these optimizations. In terms of single-value leaves method, I'm> also concerned about an extra pointer traversal and extra memory> allocation. It's most flexible but multi-value leaves method is also> flexible enough for many use cases. Using the single-value method> seems to be too much as the first step for me.>> Overall, using 64-bit keys and 64-bit values would be a reasonable> choice for me as the first step . It can cover wider use cases> including vacuum TID use cases. And possibly it can cover use cases by> combining a hash table or using tree of tree, for example.These two aspects would also bring it closer to Andres' prototype, which 1) makes review easier and 2) easier to preserve optimization work already done, so +1 from me.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Tue, 19 Jul 2022 11:30:24 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jul 19, 2022 at 1:30 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n>\n> On Tue, Jul 19, 2022 at 9:11 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I’d like to keep the first version simple. We can improve it and add\n> > more optimizations later. Using radix tree for vacuum TID storage\n> > would still be a big win comparing to using a flat array, even without\n> > all these optimizations. In terms of single-value leaves method, I'm\n> > also concerned about an extra pointer traversal and extra memory\n> > allocation. It's most flexible but multi-value leaves method is also\n> > flexible enough for many use cases. Using the single-value method\n> > seems to be too much as the first step for me.\n> >\n> > Overall, using 64-bit keys and 64-bit values would be a reasonable\n> > choice for me as the first step . It can cover wider use cases\n> > including vacuum TID use cases. And possibly it can cover use cases by\n> > combining a hash table or using tree of tree, for example.\n>\n> These two aspects would also bring it closer to Andres' prototype, which 1) makes review easier and 2) easier to preserve optimization work already done, so +1 from me.\n\nThanks.\n\nI've updated the patch. It now implements 64-bit keys, 64-bit values,\nand the multi-value leaves method. I've tried to remove duplicated\ncodes but we might find a better way to do that.\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/", "msg_date": "Fri, 22 Jul 2022 10:43:09 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jul 22, 2022 at 10:43 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Tue, Jul 19, 2022 at 1:30 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> >\n> >\n> > On Tue, Jul 19, 2022 at 9:11 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > I’d like to keep the first version simple. We can improve it and add\n> > > more optimizations later. Using radix tree for vacuum TID storage\n> > > would still be a big win comparing to using a flat array, even without\n> > > all these optimizations. In terms of single-value leaves method, I'm\n> > > also concerned about an extra pointer traversal and extra memory\n> > > allocation. It's most flexible but multi-value leaves method is also\n> > > flexible enough for many use cases. Using the single-value method\n> > > seems to be too much as the first step for me.\n> > >\n> > > Overall, using 64-bit keys and 64-bit values would be a reasonable\n> > > choice for me as the first step . It can cover wider use cases\n> > > including vacuum TID use cases. And possibly it can cover use cases by\n> > > combining a hash table or using tree of tree, for example.\n> >\n> > These two aspects would also bring it closer to Andres' prototype, which 1) makes review easier and 2) easier to preserve optimization work already done, so +1 from me.\n>\n> Thanks.\n>\n> I've updated the patch. It now implements 64-bit keys, 64-bit values,\n> and the multi-value leaves method. I've tried to remove duplicated\n> codes but we might find a better way to do that.\n>\n\nWith the recent changes related to simd, I'm going to split the patch\ninto at least two parts: introduce other simd optimized functions used\nby the radix tree and the radix tree implementation. Particularly we\nneed two functions for radix tree: a function like pg_lfind32 but for\n8 bits integers and return the index, and a function that returns the\nindex of the first element that is >= key.\n\nRegards,\n\n--\nMasahiko Sawada\nEDB: https://www.enterprisedb.com/\n\n\n", "msg_date": "Mon, 15 Aug 2022 14:38:29 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Aug 15, 2022 at 12:39 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Fri, Jul 22, 2022 at 10:43 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Tue, Jul 19, 2022 at 1:30 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > >\n> > >\n> > > On Tue, Jul 19, 2022 at 9:11 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > > I’d like to keep the first version simple. We can improve it and add\n> > > > more optimizations later. Using radix tree for vacuum TID storage\n> > > > would still be a big win comparing to using a flat array, even without\n> > > > all these optimizations. In terms of single-value leaves method, I'm\n> > > > also concerned about an extra pointer traversal and extra memory\n> > > > allocation. It's most flexible but multi-value leaves method is also\n> > > > flexible enough for many use cases. Using the single-value method\n> > > > seems to be too much as the first step for me.\n> > > >\n> > > > Overall, using 64-bit keys and 64-bit values would be a reasonable\n> > > > choice for me as the first step . It can cover wider use cases\n> > > > including vacuum TID use cases. And possibly it can cover use cases by\n> > > > combining a hash table or using tree of tree, for example.\n> > >\n> > > These two aspects would also bring it closer to Andres' prototype, which 1) makes review easier and 2) easier to preserve optimization work already done, so +1 from me.\n> >\n> > Thanks.\n> >\n> > I've updated the patch. It now implements 64-bit keys, 64-bit values,\n> > and the multi-value leaves method. I've tried to remove duplicated\n> > codes but we might find a better way to do that.\n> >\n>\n> With the recent changes related to simd, I'm going to split the patch\n> into at least two parts: introduce other simd optimized functions used\n> by the radix tree and the radix tree implementation. Particularly we\n> need two functions for radix tree: a function like pg_lfind32 but for\n> 8 bits integers and return the index, and a function that returns the\n> index of the first element that is >= key.\n\nI recommend looking at\n\nhttps://www.postgresql.org/message-id/CAFBsxsESLUyJ5spfOSyPrOvKUEYYNqsBosue9SV1j8ecgNXSKA%40mail.gmail.com\n\nsince I did the work just now for searching bytes and returning a\nbool, buth = and <=. Should be pretty close. Also, i believe if you\nleft this for last as a possible refactoring, it might save some work.\nIn any case, I'll take a look at the latest patch next month.\n\n-- \nJohn Naylor\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Mon, 15 Aug 2022 20:39:27 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Aug 15, 2022 at 10:39 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Aug 15, 2022 at 12:39 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Fri, Jul 22, 2022 at 10:43 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Tue, Jul 19, 2022 at 1:30 PM John Naylor\n> > > <john.naylor@enterprisedb.com> wrote:\n> > > >\n> > > >\n> > > >\n> > > > On Tue, Jul 19, 2022 at 9:11 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > >\n> > > > > I’d like to keep the first version simple. We can improve it and add\n> > > > > more optimizations later. Using radix tree for vacuum TID storage\n> > > > > would still be a big win comparing to using a flat array, even without\n> > > > > all these optimizations. In terms of single-value leaves method, I'm\n> > > > > also concerned about an extra pointer traversal and extra memory\n> > > > > allocation. It's most flexible but multi-value leaves method is also\n> > > > > flexible enough for many use cases. Using the single-value method\n> > > > > seems to be too much as the first step for me.\n> > > > >\n> > > > > Overall, using 64-bit keys and 64-bit values would be a reasonable\n> > > > > choice for me as the first step . It can cover wider use cases\n> > > > > including vacuum TID use cases. And possibly it can cover use cases by\n> > > > > combining a hash table or using tree of tree, for example.\n> > > >\n> > > > These two aspects would also bring it closer to Andres' prototype, which 1) makes review easier and 2) easier to preserve optimization work already done, so +1 from me.\n> > >\n> > > Thanks.\n> > >\n> > > I've updated the patch. It now implements 64-bit keys, 64-bit values,\n> > > and the multi-value leaves method. I've tried to remove duplicated\n> > > codes but we might find a better way to do that.\n> > >\n> >\n> > With the recent changes related to simd, I'm going to split the patch\n> > into at least two parts: introduce other simd optimized functions used\n> > by the radix tree and the radix tree implementation. Particularly we\n> > need two functions for radix tree: a function like pg_lfind32 but for\n> > 8 bits integers and return the index, and a function that returns the\n> > index of the first element that is >= key.\n>\n> I recommend looking at\n>\n> https://www.postgresql.org/message-id/CAFBsxsESLUyJ5spfOSyPrOvKUEYYNqsBosue9SV1j8ecgNXSKA%40mail.gmail.com\n>\n> since I did the work just now for searching bytes and returning a\n> bool, buth = and <=. Should be pretty close. Also, i believe if you\n> left this for last as a possible refactoring, it might save some work.\n> In any case, I'll take a look at the latest patch next month.\n\nI've updated the radix tree patch. It's now separated into two patches.\n\n0001 patch introduces pg_lsearch8() and pg_lsearch8_ge() (we may find\nbetter names) that are similar to the pg_lfind8() family but they\nreturn the index of the key in the vector instead of true/false. The\npatch includes regression tests.\n\n0002 patch is the main radix tree implementation. I've removed some\nduplicated codes of node manipulation. For instance, since node-4,\nnode-16, and node-32 have a similar structure with different fanouts,\nI introduced the common function for them.\n\nIn addition to two patches, I've attached the third patch. It's not\npart of radix tree implementation but introduces a contrib module\nbench_radix_tree, a tool for radix tree performance benchmarking. It\nmeasures loading and lookup performance of both the radix tree and a\nflat array.\n\nRegards,\n\n--\nMasahiko Sawada\nPostgreSQL Contributors Team\nRDS Open Source Databases\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Fri, 16 Sep 2022 15:00:31 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Sep 16, 2022 at 1:01 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Aug 15, 2022 at 10:39 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > bool, buth = and <=. Should be pretty close. Also, i believe if you\n> > left this for last as a possible refactoring, it might save some work.\n\nv6 demonstrates why this should have been put off towards the end. (more below)\n\n> > In any case, I'll take a look at the latest patch next month.\n\nSince the CF entry said \"Needs Review\", I began looking at v5 again\nthis week. Hopefully not too much has changed, but in the future I\nstrongly recommend setting to \"Waiting on Author\" if a new version is\nforthcoming. I realize many here share updated patches at any time,\nbut I'd like to discourage the practice especially for large patches.\n\n> I've updated the radix tree patch. It's now separated into two patches.\n>\n> 0001 patch introduces pg_lsearch8() and pg_lsearch8_ge() (we may find\n> better names) that are similar to the pg_lfind8() family but they\n> return the index of the key in the vector instead of true/false. The\n> patch includes regression tests.\n\nI don't want to do a full review of this just yet, but I'll just point\nout some problems from a quick glance.\n\n+/*\n+ * Return the index of the first element in the vector that is greater than\n+ * or eual to the given scalar. Return sizeof(Vector8) if there is no such\n+ * element.\n\nThat's a bizarre API to indicate non-existence.\n\n+ *\n+ * Note that this function assumes the elements in the vector are sorted.\n+ */\n\nThat is *completely* unacceptable for a general-purpose function.\n\n+#else /* USE_NO_SIMD */\n+ Vector8 r = 0;\n+ uint8 *rp = (uint8 *) &r;\n+\n+ for (Size i = 0; i < sizeof(Vector8); i++)\n+ rp[i] = (((const uint8 *) &v1)[i] == ((const uint8 *) &v2)[i]) ? 0xFF : 0;\n\nI don't think we should try to force the non-simd case to adopt the\nspecial semantics of vector comparisons. It's much easier to just use\nthe same logic as the assert builds.\n\n+#ifdef USE_SSE2\n+ return (uint32) _mm_movemask_epi8(v);\n+#elif defined(USE_NEON)\n+ static const uint8 mask[16] = {\n+ 1 << 0, 1 << 1, 1 << 2, 1 << 3,\n+ 1 << 4, 1 << 5, 1 << 6, 1 << 7,\n+ 1 << 0, 1 << 1, 1 << 2, 1 << 3,\n+ 1 << 4, 1 << 5, 1 << 6, 1 << 7,\n+ };\n+\n+ uint8x16_t masked = vandq_u8(vld1q_u8(mask), (uint8x16_t)\nvshrq_n_s8(v, 7));\n+ uint8x16_t maskedhi = vextq_u8(masked, masked, 8);\n+\n+ return (uint32) vaddvq_u16((uint16x8_t) vzip1q_u8(masked, maskedhi));\n\nFor Arm, we need to be careful here. This article goes into a lot of\ndetail for this situation:\n\nhttps://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon\n\nHere again, I'd rather put this off and focus on getting the \"large\ndetails\" in good enough shape so we can got towards integrating with\nvacuum.\n\n> In addition to two patches, I've attached the third patch. It's not\n> part of radix tree implementation but introduces a contrib module\n> bench_radix_tree, a tool for radix tree performance benchmarking. It\n> measures loading and lookup performance of both the radix tree and a\n> flat array.\n\nExcellent! This was high on my wish list.\n\n-- \nJohn Naylor\nEDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Fri, 16 Sep 2022 14:54:14 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Sep 16, 2022 at 02:54:14PM +0700, John Naylor wrote:\n> Here again, I'd rather put this off and focus on getting the \"large\n> details\" in good enough shape so we can got towards integrating with\n> vacuum.\n\nI started a new thread for the SIMD patch [0] so that this thread can\nremain focused on the radix tree stuff.\n\n[0] https://www.postgresql.org/message-id/20220917052903.GA3172400%40nathanxps13\n\n-- \nNathan Bossart\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Sat, 17 Sep 2022 14:42:10 -0700", "msg_from": "Nathan Bossart <nathandbossart@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Sep 16, 2022 at 4:54 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Fri, Sep 16, 2022 at 1:01 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Aug 15, 2022 at 10:39 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > bool, buth = and <=. Should be pretty close. Also, i believe if you\n> > > left this for last as a possible refactoring, it might save some work.\n>\n> v6 demonstrates why this should have been put off towards the end. (more below)\n>\n> > > In any case, I'll take a look at the latest patch next month.\n>\n> Since the CF entry said \"Needs Review\", I began looking at v5 again\n> this week. Hopefully not too much has changed, but in the future I\n> strongly recommend setting to \"Waiting on Author\" if a new version is\n> forthcoming. I realize many here share updated patches at any time,\n> but I'd like to discourage the practice especially for large patches.\n\nUnderstood. Sorry for the inconveniences.\n\n>\n> > I've updated the radix tree patch. It's now separated into two patches.\n> >\n> > 0001 patch introduces pg_lsearch8() and pg_lsearch8_ge() (we may find\n> > better names) that are similar to the pg_lfind8() family but they\n> > return the index of the key in the vector instead of true/false. The\n> > patch includes regression tests.\n>\n> I don't want to do a full review of this just yet, but I'll just point\n> out some problems from a quick glance.\n>\n> +/*\n> + * Return the index of the first element in the vector that is greater than\n> + * or eual to the given scalar. Return sizeof(Vector8) if there is no such\n> + * element.\n>\n> That's a bizarre API to indicate non-existence.\n>\n> + *\n> + * Note that this function assumes the elements in the vector are sorted.\n> + */\n>\n> That is *completely* unacceptable for a general-purpose function.\n>\n> +#else /* USE_NO_SIMD */\n> + Vector8 r = 0;\n> + uint8 *rp = (uint8 *) &r;\n> +\n> + for (Size i = 0; i < sizeof(Vector8); i++)\n> + rp[i] = (((const uint8 *) &v1)[i] == ((const uint8 *) &v2)[i]) ? 0xFF : 0;\n>\n> I don't think we should try to force the non-simd case to adopt the\n> special semantics of vector comparisons. It's much easier to just use\n> the same logic as the assert builds.\n>\n> +#ifdef USE_SSE2\n> + return (uint32) _mm_movemask_epi8(v);\n> +#elif defined(USE_NEON)\n> + static const uint8 mask[16] = {\n> + 1 << 0, 1 << 1, 1 << 2, 1 << 3,\n> + 1 << 4, 1 << 5, 1 << 6, 1 << 7,\n> + 1 << 0, 1 << 1, 1 << 2, 1 << 3,\n> + 1 << 4, 1 << 5, 1 << 6, 1 << 7,\n> + };\n> +\n> + uint8x16_t masked = vandq_u8(vld1q_u8(mask), (uint8x16_t)\n> vshrq_n_s8(v, 7));\n> + uint8x16_t maskedhi = vextq_u8(masked, masked, 8);\n> +\n> + return (uint32) vaddvq_u16((uint16x8_t) vzip1q_u8(masked, maskedhi));\n>\n> For Arm, we need to be careful here. This article goes into a lot of\n> detail for this situation:\n>\n> https://community.arm.com/arm-community-blogs/b/infrastructure-solutions-blog/posts/porting-x86-vector-bitmask-optimizations-to-arm-neon\n>\n> Here again, I'd rather put this off and focus on getting the \"large\n> details\" in good enough shape so we can got towards integrating with\n> vacuum.\n\nThank you for the comments! These above comments are addressed by\nNathan in a newly derived thread. I'll work on the patch.\n\nI'll consider how to integrate with vacuum as the next step. One\nconcern for me is how to limit the memory usage to\nmaintenance_work_mem. Unlike using a flat array, memory space for\nadding one TID varies depending on the situation. If we want strictly\nnot to allow using memory more than maintenance_work_mem, probably we\nneed to estimate the memory consumption in a conservative way.\n\n\nRegards,\n\n--\nMasahiko Sawada\nPostgreSQL Contributors Team\nRDS Open Source Databases\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 20 Sep 2022 17:19:11 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Sep 20, 2022 at 3:19 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Fri, Sep 16, 2022 at 4:54 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\n> > Here again, I'd rather put this off and focus on getting the \"large\n> > details\" in good enough shape so we can got towards integrating with\n> > vacuum.\n>\n> Thank you for the comments! These above comments are addressed by\n> Nathan in a newly derived thread. I'll work on the patch.\n\nI still seem to be out-voted on when to tackle this particular\noptimization, so I've extended the v6 benchmark code with a hackish\nfunction that populates a fixed number of keys, but with different fanouts.\n(diff attached as a text file)\n\nI didn't take particular care to make this scientific, but the following\nseems pretty reproducible. Note what happens to load and search performance\nwhen node16 has 15 entries versus 16:\n\n fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n--------+--------+------------------+------------+--------------\n 15 | 327680 | 3776512 | 39 | 20\n(1 row)\nnum_keys = 327680, height = 4, n4 = 1, n16 = 23408, n32 = 0, n128 = 0, n256\n= 0\n\n fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n--------+--------+------------------+------------+--------------\n 16 | 327680 | 3514368 | 25 | 11\n(1 row)\nnum_keys = 327680, height = 4, n4 = 0, n16 = 21846, n32 = 0, n128 = 0, n256\n= 0\n\nIn trying to wrap the SIMD code behind layers of abstraction, the latest\npatch (and Nathan's cleanup) threw it away in almost all cases. To explain,\nwe need to talk about how vectorized code deals with the \"tail\" that is too\nsmall for the register:\n\n1. Use a one-by-one algorithm, like we do for the pg_lfind* variants.\n2. Read some junk into the register and mask off false positives from the\nresult.\n\nThere are advantages to both depending on the situation.\n\nPatch v5 and earlier used #2. Patch v6 used #1, so if a node16 has 15\nelements or less, it will iterate over them one-by-one exactly like a\nnode4. Only when full with 16 will the vector path be taken. When another\nentry is added, the elements are copied to the next bigger node, so there's\na *small* window where it's fast.\n\nIn short, this code needs to be lower level so that we still have full\ncontrol while being portable. I will work on this, and also the related\ncode for node dispatch.\n\nSince v6 has some good infrastructure to do low-level benchmarking, I also\nwant to do some experiments with memory management.\n\n(I have further comments about the code, but I will put that off until\nlater)\n\n> I'll consider how to integrate with vacuum as the next step. One\n> concern for me is how to limit the memory usage to\n> maintenance_work_mem. Unlike using a flat array, memory space for\n> adding one TID varies depending on the situation. If we want strictly\n> not to allow using memory more than maintenance_work_mem, probably we\n> need to estimate the memory consumption in a conservative way.\n\n+1\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 21 Sep 2022 13:17:21 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Sep 21, 2022 at 01:17:21PM +0700, John Naylor wrote:\n> In trying to wrap the SIMD code behind layers of abstraction, the latest\n> patch (and Nathan's cleanup) threw it away in almost all cases. To explain,\n> we need to talk about how vectorized code deals with the \"tail\" that is too\n> small for the register:\n> \n> 1. Use a one-by-one algorithm, like we do for the pg_lfind* variants.\n> 2. Read some junk into the register and mask off false positives from the\n> result.\n> \n> There are advantages to both depending on the situation.\n> \n> Patch v5 and earlier used #2. Patch v6 used #1, so if a node16 has 15\n> elements or less, it will iterate over them one-by-one exactly like a\n> node4. Only when full with 16 will the vector path be taken. When another\n> entry is added, the elements are copied to the next bigger node, so there's\n> a *small* window where it's fast.\n> \n> In short, this code needs to be lower level so that we still have full\n> control while being portable. I will work on this, and also the related\n> code for node dispatch.\n\nIs it possible to use approach #2 here, too? AFAICT space is allocated for\nall of the chunks, so there wouldn't be any danger in searching all them\nand discarding any results >= node->count. Granted, we're depending on the\nnumber of chunks always being a multiple of elements-per-vector in order to\navoid the tail path, but that seems like a reasonably safe assumption that\ncan be covered with comments.\n\n-- \nNathan Bossart\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 21 Sep 2022 11:01:26 -0700", "msg_from": "Nathan Bossart <nathandbossart@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Sep 22, 2022 at 1:01 AM Nathan Bossart <nathandbossart@gmail.com>\nwrote:\n>\n> On Wed, Sep 21, 2022 at 01:17:21PM +0700, John Naylor wrote:\n>\n> > In short, this code needs to be lower level so that we still have full\n> > control while being portable. I will work on this, and also the related\n> > code for node dispatch.\n>\n> Is it possible to use approach #2 here, too? AFAICT space is allocated\nfor\n> all of the chunks, so there wouldn't be any danger in searching all them\n> and discarding any results >= node->count.\n\nSure, the caller could pass the maximum node capacity, and then check if\nthe returned index is within the range of the node count.\n\n> Granted, we're depending on the\n> number of chunks always being a multiple of elements-per-vector in order\nto\n> avoid the tail path, but that seems like a reasonably safe assumption that\n> can be covered with comments.\n\nActually, we don't need to depend on that at all. When I said \"junk\" above,\nthat can be any bytes, as long as we're not reading off the end of\nallocated memory. We'll never do that here, since the child pointers/values\nfollow. In that case, the caller can hard-code the size (it would even\nhappen to work now to multiply rt_node_kind by 16, to be sneaky). One thing\nI want to try soon is storing fewer than 16/32 etc entries, so that the\nwhole node fits comfortably inside a power-of-two allocation. That would\nallow us to use aset without wasting space for the smaller nodes, which\nwould be faster and possibly would solve the fragmentation problem Andres\nreferred to in\n\nhttps://www.postgresql.org/message-id/20220704220038.at2ane5xkymzzssb%40awork3.anarazel.de\n\nWhile on the subject, I wonder how important it is to keep the chunks in\nthe small nodes in sorted order. That adds branches and memmove calls, and\nis the whole reason for the recent \"pg_lfind_ge\" function.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Sep 22, 2022 at 1:01 AM Nathan Bossart <nathandbossart@gmail.com> wrote:>> On Wed, Sep 21, 2022 at 01:17:21PM +0700, John Naylor wrote:>> > In short, this code needs to be lower level so that we still have full> > control while being portable. I will work on this, and also the related> > code for node dispatch.>> Is it possible to use approach #2 here, too?  AFAICT space is allocated for> all of the chunks, so there wouldn't be any danger in searching all them> and discarding any results >= node->count.Sure, the caller could pass the maximum node capacity, and then check if the returned index is within the range of the node count.> Granted, we're depending on the> number of chunks always being a multiple of elements-per-vector in order to> avoid the tail path, but that seems like a reasonably safe assumption that> can be covered with comments.Actually, we don't need to depend on that at all. When I said \"junk\" above, that can be any bytes, as long as we're not reading off the end of allocated memory. We'll never do that here, since the child pointers/values follow. In that case, the caller can hard-code the  size (it would even happen to work now to multiply rt_node_kind by 16, to be sneaky). One thing I want to try soon is storing fewer than 16/32 etc entries, so that the whole node fits comfortably inside a power-of-two allocation. That would allow us to use aset without wasting space for the smaller nodes, which would be faster and possibly would solve the fragmentation problem Andres referred to inhttps://www.postgresql.org/message-id/20220704220038.at2ane5xkymzzssb%40awork3.anarazel.deWhile on the subject, I wonder how important it is to keep the chunks in the small nodes in sorted order. That adds branches and memmove calls, and is the whole reason for the recent \"pg_lfind_ge\" function.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 22 Sep 2022 11:46:24 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Sep 22, 2022 at 1:46 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Thu, Sep 22, 2022 at 1:01 AM Nathan Bossart <nathandbossart@gmail.com> wrote:\n> >\n> > On Wed, Sep 21, 2022 at 01:17:21PM +0700, John Naylor wrote:\n> >\n> > > In short, this code needs to be lower level so that we still have full\n> > > control while being portable. I will work on this, and also the related\n> > > code for node dispatch.\n> >\n> > Is it possible to use approach #2 here, too? AFAICT space is allocated for\n> > all of the chunks, so there wouldn't be any danger in searching all them\n> > and discarding any results >= node->count.\n>\n> Sure, the caller could pass the maximum node capacity, and then check if the returned index is within the range of the node count.\n>\n> > Granted, we're depending on the\n> > number of chunks always being a multiple of elements-per-vector in order to\n> > avoid the tail path, but that seems like a reasonably safe assumption that\n> > can be covered with comments.\n>\n> Actually, we don't need to depend on that at all. When I said \"junk\" above, that can be any bytes, as long as we're not reading off the end of allocated memory. We'll never do that here, since the child pointers/values follow. In that case, the caller can hard-code the size (it would even happen to work now to multiply rt_node_kind by 16, to be sneaky). One thing I want to try soon is storing fewer than 16/32 etc entries, so that the whole node fits comfortably inside a power-of-two allocation. That would allow us to use aset without wasting space for the smaller nodes, which would be faster and possibly would solve the fragmentation problem Andres referred to in\n>\n> https://www.postgresql.org/message-id/20220704220038.at2ane5xkymzzssb%40awork3.anarazel.de\n>\n> While on the subject, I wonder how important it is to keep the chunks in the small nodes in sorted order. That adds branches and memmove calls, and is the whole reason for the recent \"pg_lfind_ge\" function.\n\nGood point. While keeping the chunks in the small nodes in sorted\norder is useful for visiting all keys in sorted order, additional\nbranches and memmove calls could be slow.\n\nRegards,\n\n-- \nMasahiko Sawada\nPostgreSQL Contributors Team\nRDS Open Source Databases\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 22 Sep 2022 15:26:14 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Sep 22, 2022 at 1:26 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Thu, Sep 22, 2022 at 1:46 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> > While on the subject, I wonder how important it is to keep the chunks\nin the small nodes in sorted order. That adds branches and memmove calls,\nand is the whole reason for the recent \"pg_lfind_ge\" function.\n>\n> Good point. While keeping the chunks in the small nodes in sorted\n> order is useful for visiting all keys in sorted order, additional\n> branches and memmove calls could be slow.\n\nRight, the ordering is a property that some users will need, so best to\nkeep it. Although the node128 doesn't have that property -- too slow to do\nso, I think.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Sep 22, 2022 at 1:26 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Thu, Sep 22, 2022 at 1:46 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> > While on the subject, I wonder how important it is to keep the chunks in the small nodes in sorted order. That adds branches and memmove calls, and is the whole reason for the recent \"pg_lfind_ge\" function.>> Good point. While keeping the chunks in the small nodes in sorted> order is useful for visiting all keys in sorted order, additional> branches and memmove calls could be slow.Right, the ordering is a property that some users will need, so best to keep it. Although the node128 doesn't have that property -- too slow to do so, I think.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 22 Sep 2022 19:52:23 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Sep 22, 2022 at 7:52 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n>\n>\n> On Thu, Sep 22, 2022 at 1:26 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> > Good point. While keeping the chunks in the small nodes in sorted\n> > order is useful for visiting all keys in sorted order, additional\n> > branches and memmove calls could be slow.\n>\n> Right, the ordering is a property that some users will need, so best to\nkeep it. Although the node128 doesn't have that property -- too slow to do\nso, I think.\n\nNevermind, I must have been mixing up keys and values there...\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Sep 22, 2022 at 7:52 PM John Naylor <john.naylor@enterprisedb.com> wrote:>>> On Thu, Sep 22, 2022 at 1:26 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> > Good point. While keeping the chunks in the small nodes in sorted> > order is useful for visiting all keys in sorted order, additional> > branches and memmove calls could be slow.>> Right, the ordering is a property that some users will need, so best to keep it. Although the node128 doesn't have that property -- too slow to do so, I think.Nevermind, I must have been mixing up keys and values there...--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 22 Sep 2022 21:37:58 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Sep 22, 2022 at 11:46 AM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n> One thing I want to try soon is storing fewer than 16/32 etc entries, so\nthat the whole node fits comfortably inside a power-of-two allocation. That\nwould allow us to use aset without wasting space for the smaller nodes,\nwhich would be faster and possibly would solve the fragmentation problem\nAndres referred to in\n\n>\nhttps://www.postgresql.org/message-id/20220704220038.at2ane5xkymzzssb%40awork3.anarazel.de\n\nWhile calculating node sizes that fit within a power-of-two size, I noticed\nthe current base node is a bit wasteful, taking up 8 bytes. The node kind\nonly has a small number of values, so it doesn't really make sense to use\nan enum here in the struct (in fact, Andres' prototype used a uint8 for\nnode_kind). We could use a bitfield for the count and kind:\n\nuint16 -- kind and count bitfield\nuint8 shift;\nuint8 chunk;\n\nThat's only 4 bytes. Plus, if the kind is ever encoded in a pointer tag,\nthe bitfield can just go back to being count only.\n\nHere are the v6 node kinds:\n\nnode4: 8 + 4 +(4) + 4*8 = 48 bytes\nnode16: 8 + 16 + 16*8 = 152\nnode32: 8 + 32 + 32*8 = 296\nnode128: 8 + 256 + 128/8 + 128*8 = 1304\nnode256: 8 + 256/8 + 256*8 = 2088\n\nAnd here are the possible ways we could optimize nodes for space using aset\nallocation. Parentheses are padding bytes. Even if my math has mistakes,\nthe numbers shouldn't be too far off:\n\nnode3: 4 + 3 +(1) + 3*8 = 32 bytes\nnode6: 4 + 6 +(6) + 6*8 = 64\nnode13: 4 + 13 +(7) + 13*8 = 128\nnode28: 4 + 28 + 28*8 = 256\nnode31: 4 + 256 + 32/8 + 31*8 = 512 (XXX not good)\nnode94: 4 + 256 + 96/8 + 94*8 = 1024\nnode220: 4 + 256 + 224/8 + 220*8 = 2048\nnode256: = 4096\n\nThe main disadvantage is that node256 would balloon in size.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Sep 22, 2022 at 11:46 AM John Naylor <john.naylor@enterprisedb.com> wrote:> One thing I want to try soon is storing fewer than 16/32 etc entries, so that the whole node fits comfortably inside a power-of-two allocation. That would allow us to use aset without wasting space for the smaller nodes, which would be faster and possibly would solve the fragmentation problem Andres referred to in> https://www.postgresql.org/message-id/20220704220038.at2ane5xkymzzssb%40awork3.anarazel.deWhile calculating node sizes that fit within a power-of-two size, I noticed the current base node is a bit wasteful, taking up 8 bytes. The node kind only has a small number of values, so it doesn't really make sense to use an enum here in the struct (in fact, Andres' prototype used a uint8 for node_kind). We could use a bitfield for the count and kind:uint16 -- kind and count bitfielduint8\t\tshift;uint8\t\tchunk;That's only 4 bytes. Plus, if the kind is ever encoded in a pointer tag, the bitfield can just go back to being count only.Here are the v6 node kinds:node4:   8 +   4 +(4)    +   4*8 =   48 bytesnode16:  8 +  16         +  16*8 =  152node32:  8 +  32         +  32*8 =  296node128: 8 + 256 + 128/8 + 128*8 = 1304node256: 8       + 256/8 + 256*8 = 2088And here are the possible ways we could optimize nodes for space using aset allocation. Parentheses are padding bytes. Even if my math has mistakes, the numbers shouldn't be too far off:node3:   4 +   3 +(1)    +   3*8 =   32 bytesnode6:   4 +   6 +(6)    +   6*8 =   64node13:  4 +  13 +(7)    +  13*8 =  128node28:  4 +  28         +  28*8 =  256node31:  4 + 256 +  32/8 +  31*8 =  512 (XXX not good)node94:  4 + 256 +  96/8 +  94*8 = 1024node220: 4 + 256 + 224/8 + 220*8 = 2048node256:                         = 4096The main disadvantage is that node256 would balloon in size.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 22 Sep 2022 22:11:18 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Sep 23, 2022 at 12:11 AM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Thu, Sep 22, 2022 at 11:46 AM John Naylor <john.naylor@enterprisedb.com> wrote:\n> > One thing I want to try soon is storing fewer than 16/32 etc entries, so that the whole node fits comfortably inside a power-of-two allocation. That would allow us to use aset without wasting space for the smaller nodes, which would be faster and possibly would solve the fragmentation problem Andres referred to in\n>\n> > https://www.postgresql.org/message-id/20220704220038.at2ane5xkymzzssb%40awork3.anarazel.de\n>\n> While calculating node sizes that fit within a power-of-two size, I noticed the current base node is a bit wasteful, taking up 8 bytes. The node kind only has a small number of values, so it doesn't really make sense to use an enum here in the struct (in fact, Andres' prototype used a uint8 for node_kind). We could use a bitfield for the count and kind:\n>\n> uint16 -- kind and count bitfield\n> uint8 shift;\n> uint8 chunk;\n>\n> That's only 4 bytes. Plus, if the kind is ever encoded in a pointer tag, the bitfield can just go back to being count only.\n\nGood point, agreed.\n\n>\n> Here are the v6 node kinds:\n>\n> node4: 8 + 4 +(4) + 4*8 = 48 bytes\n> node16: 8 + 16 + 16*8 = 152\n> node32: 8 + 32 + 32*8 = 296\n> node128: 8 + 256 + 128/8 + 128*8 = 1304\n> node256: 8 + 256/8 + 256*8 = 2088\n>\n> And here are the possible ways we could optimize nodes for space using aset allocation. Parentheses are padding bytes. Even if my math has mistakes, the numbers shouldn't be too far off:\n>\n> node3: 4 + 3 +(1) + 3*8 = 32 bytes\n> node6: 4 + 6 +(6) + 6*8 = 64\n> node13: 4 + 13 +(7) + 13*8 = 128\n> node28: 4 + 28 + 28*8 = 256\n> node31: 4 + 256 + 32/8 + 31*8 = 512 (XXX not good)\n> node94: 4 + 256 + 96/8 + 94*8 = 1024\n> node220: 4 + 256 + 224/8 + 220*8 = 2048\n> node256: = 4096\n>\n> The main disadvantage is that node256 would balloon in size.\n\nYeah, node31 and node256 are bloated. We probably could use slab for\nnode256 independently. It's worth trying a benchmark to see how it\naffects the performance and the tree size.\n\nBTW We need to consider not only aset/slab but also DSA since we\nallocate dead tuple TIDs on DSM in parallel vacuum cases. FYI DSA uses\nthe following size classes:\n\nstatic const uint16 dsa_size_classes[] = {\n sizeof(dsa_area_span), 0, /* special size classes */\n 8, 16, 24, 32, 40, 48, 56, 64, /* 8 classes separated by 8 bytes */\n 80, 96, 112, 128, /* 4 classes separated by 16 bytes */\n 160, 192, 224, 256, /* 4 classes separated by 32 bytes */\n 320, 384, 448, 512, /* 4 classes separated by 64 bytes */\n 640, 768, 896, 1024, /* 4 classes separated by 128 bytes */\n 1280, 1560, 1816, 2048, /* 4 classes separated by ~256 bytes */\n 2616, 3120, 3640, 4096, /* 4 classes separated by ~512 bytes */\n 5456, 6552, 7280, 8192 /* 4 classes separated by ~1024 bytes */\n};\n\nnode256 will be classed as 2616, which is still not good.\n\nAnyway, I'll implement DSA support for radix tree.\n\nRegards,\n\n-- \nMasahiko Sawada\nPostgreSQL Contributors Team\nRDS Open Source Databases\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 28 Sep 2022 12:49:07 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Sep 28, 2022 at 10:49 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> BTW We need to consider not only aset/slab but also DSA since we\n> allocate dead tuple TIDs on DSM in parallel vacuum cases. FYI DSA uses\n> the following size classes:\n>\n> static const uint16 dsa_size_classes[] = {\n> [...]\n\nThanks for that info -- I wasn't familiar with the details of DSA. For the\nnon-parallel case, I plan to at least benchmark using aset because I gather\nit's the most heavily optimized. I'm thinking that will allow other problem\nareas to be more prominent. I'll also want to compare total context size\ncompared to slab to see if possibly less fragmentation makes up for other\nwastage.\n\nAlong those lines, one thing I've been thinking about is the number of size\nclasses. There is a tradeoff between memory efficiency and number of\nbranches when searching/inserting. My current thinking is there is too much\ncoupling between size class and data type. Each size class currently uses a\ndifferent data type and a different algorithm to search and set it, which\nin turn requires another branch. We've found that a larger number of size\nclasses leads to poor branch prediction [1] and (I imagine) code density.\n\nI'm thinking we can use \"flexible array members\" for the values/pointers,\nand keep the rest of the control data in the struct the same. That way, we\nnever have more than 4 actual \"kinds\" to code and branch on. As a bonus,\nwhen migrating a node to a larger size class of the same kind, we can\nsimply repalloc() to the next size. To show what I mean, consider this new\ntable:\n\nnode2: 5 + 6 +(5)+ 2*8 = 32 bytes\nnode6: 5 + 6 +(5)+ 6*8 = 64\n\nnode12: 5 + 27 + 12*8 = 128\nnode27: 5 + 27 + 27*8 = 248(->256)\n\nnode91: 5 + 256 + 28 +(7)+ 91*8 = 1024\nnode219: 5 + 256 + 28 +(7)+219*8 = 2048\n\nnode256: 5 + 32 +(3)+256*8 = 2088(->4096)\n\nSeven size classes are grouped into the four kinds.\n\nThe common base at the front is here 5 bytes because there is a new uint8\nfield for \"capacity\", which we can ignore for node256 since we assume we\ncan always insert/update that node. The control data is the same in each\npair, and so the offset to the pointer/value array is the same. Thus,\nmigration would look something like:\n\ncase FOO_KIND:\nif (unlikely(count == capacity))\n{\n if (capacity == XYZ) /* for smaller size class of the pair */\n {\n <repalloc to next size class>;\n capacity = next-higher-capacity;\n goto do_insert;\n }\n else\n <migrate data to next node kind>;\n}\nelse\n{\ndo_insert:\n <...>;\n break;\n}\n/* FALLTHROUGH */\n...\n\nOne disadvantage is that this wastes some space by reserving the full set\nof control data in the smaller size class of the pair, but it's usually\nsmall compared to array size. Somewhat unrelated, we could still implement\nAndres' idea [1] to dispense with the isset array in inner nodes of the\nindirect array type (now node128), since we can just test if the pointer is\nnull.\n\n[1]\nhttps://www.postgresql.org/message-id/20220704220038.at2ane5xkymzzssb%40awork3.anarazel.de\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Wed, Sep 28, 2022 at 10:49 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> BTW We need to consider not only aset/slab but also DSA since we> allocate dead tuple TIDs on DSM in parallel vacuum cases. FYI DSA uses> the following size classes:>> static const uint16 dsa_size_classes[] = {> [...]Thanks for that info -- I wasn't familiar with the details of DSA. For the non-parallel case, I plan to at least benchmark using aset because I gather it's the most heavily optimized. I'm thinking that will allow other problem areas to be more prominent. I'll also want to compare total context size compared to slab to see if possibly less fragmentation makes up for other wastage.Along those lines, one thing I've been thinking about is the number of size classes. There is a tradeoff between memory efficiency and number of branches when searching/inserting. My current thinking is there is too much coupling between size class and data type. Each size class currently uses a different data type and a different algorithm to search and set it, which in turn requires another branch. We've found that a larger number of size classes leads to poor branch prediction [1] and (I imagine) code density.I'm thinking we can use \"flexible array members\" for the values/pointers, and keep the rest of the control data in the struct the same. That way, we never have more than 4 actual \"kinds\" to code and branch on. As a bonus, when migrating a node to a larger size class of the same kind, we can simply repalloc() to the next size. To show what I mean, consider this new table:node2:   5 +  6       +(5)+  2*8 =   32 bytesnode6:   5 +  6       +(5)+  6*8 =   64node12:  5 + 27       +     12*8 =  128node27:  5 + 27       +     27*8 =  248(->256)node91:  5 + 256 + 28 +(7)+ 91*8 = 1024node219: 5 + 256 + 28 +(7)+219*8 = 2048node256: 5 + 32       +(3)+256*8 = 2088(->4096)Seven size classes are grouped into the four kinds.The common base at the front is here 5 bytes because there is a new uint8 field for \"capacity\", which we can ignore for node256 since we assume we can always insert/update that node. The control data is the same in each pair, and so the offset to the pointer/value array is the same. Thus, migration would look something like:case FOO_KIND:if (unlikely(count == capacity)){  if (capacity == XYZ) /* for smaller size class of the pair */  {    <repalloc to next size class>;    capacity = next-higher-capacity;    goto do_insert;  }  else    <migrate data to next node kind>;}else{do_insert:  <...>;  break;}/* FALLTHROUGH */...One disadvantage is that this wastes some space by reserving the full set of control data in the smaller size class of the pair, but it's usually small compared to array size. Somewhat unrelated, we could still implement Andres' idea [1] to dispense with the isset array in inner nodes of the indirect array type (now node128), since we can just test if the pointer is null.[1] https://www.postgresql.org/message-id/20220704220038.at2ane5xkymzzssb%40awork3.anarazel.de--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 28 Sep 2022 13:18:35 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Sep 28, 2022 at 1:18 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n> [stuff about size classes]\n\nI kind of buried the lede here on one thing: If we only have 4 kinds\nregardless of the number of size classes, we can use 2 bits of the pointer\nfor dispatch, which would only require 4-byte alignment. That should make\nthat technique more portable.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Wed, Sep 28, 2022 at 1:18 PM John Naylor <john.naylor@enterprisedb.com> wrote:> [stuff about size classes]I kind of buried the lede here on one thing: If we only have 4 kinds regardless of the number of size classes, we can use 2 bits of the pointer for dispatch, which would only require 4-byte alignment. That should make that technique more portable.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 28 Sep 2022 14:49:43 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Sep 28, 2022 at 3:18 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Wed, Sep 28, 2022 at 10:49 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > BTW We need to consider not only aset/slab but also DSA since we\n> > allocate dead tuple TIDs on DSM in parallel vacuum cases. FYI DSA uses\n> > the following size classes:\n> >\n> > static const uint16 dsa_size_classes[] = {\n> > [...]\n>\n> Thanks for that info -- I wasn't familiar with the details of DSA. For the non-parallel case, I plan to at least benchmark using aset because I gather it's the most heavily optimized. I'm thinking that will allow other problem areas to be more prominent. I'll also want to compare total context size compared to slab to see if possibly less fragmentation makes up for other wastage.\n\nThanks!\n\n>\n> Along those lines, one thing I've been thinking about is the number of size classes. There is a tradeoff between memory efficiency and number of branches when searching/inserting. My current thinking is there is too much coupling between size class and data type. Each size class currently uses a different data type and a different algorithm to search and set it, which in turn requires another branch. We've found that a larger number of size classes leads to poor branch prediction [1] and (I imagine) code density.\n>\n> I'm thinking we can use \"flexible array members\" for the values/pointers, and keep the rest of the control data in the struct the same. That way, we never have more than 4 actual \"kinds\" to code and branch on. As a bonus, when migrating a node to a larger size class of the same kind, we can simply repalloc() to the next size.\n\nInteresting idea. Using flexible array members for values would be\ngood also for the case in the future where we want to support other\nvalue types than uint64.\n\nWith this idea, we can just repalloc() to grow to the larger size in a\npair but I'm slightly concerned that the more size class we use, the\nmore frequent the node needs to grow. If we want to support node\nshrink, the deletion is also affected.\n\n> To show what I mean, consider this new table:\n>\n> node2: 5 + 6 +(5)+ 2*8 = 32 bytes\n> node6: 5 + 6 +(5)+ 6*8 = 64\n>\n> node12: 5 + 27 + 12*8 = 128\n> node27: 5 + 27 + 27*8 = 248(->256)\n>\n> node91: 5 + 256 + 28 +(7)+ 91*8 = 1024\n> node219: 5 + 256 + 28 +(7)+219*8 = 2048\n>\n> node256: 5 + 32 +(3)+256*8 = 2088(->4096)\n>\n> Seven size classes are grouped into the four kinds.\n>\n> The common base at the front is here 5 bytes because there is a new uint8 field for \"capacity\", which we can ignore for node256 since we assume we can always insert/update that node. The control data is the same in each pair, and so the offset to the pointer/value array is the same. Thus, migration would look something like:\n\nI think we can use a bitfield for capacity. That way, we can pack\ncount (9bits), kind (2bits)and capacity (4bits) in uint16.\n\n> Somewhat unrelated, we could still implement Andres' idea [1] to dispense with the isset array in inner nodes of the indirect array type (now node128), since we can just test if the pointer is null.\n\nRight. I didn't do that to use the common logic for inner node128 and\nleaf node128.\n\nRegards,\n\n-- \nMasahiko Sawada\nPostgreSQL Contributors Team\nRDS Open Source Databases\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Sat, 1 Oct 2022 00:46:54 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2022-09-16 15:00:31 +0900, Masahiko Sawada wrote:\n> I've updated the radix tree patch. It's now separated into two patches.\n\ncfbot notices a compiler warning:\nhttps://cirrus-ci.com/task/6247907681632256?logs=gcc_warning#L446\n\n[11:03:05.343] radixtree.c: In function ‘rt_iterate_next’:\n[11:03:05.343] radixtree.c:1758:15: error: ‘slot’ may be used uninitialized in this function [-Werror=maybe-uninitialized]\n[11:03:05.343] 1758 | *value_p = *((uint64 *) slot);\n[11:03:05.343] | ^~~~~~~~~~~~~~~~~~\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Sun, 2 Oct 2022 10:04:24 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Oct 3, 2022 at 2:04 AM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n> On 2022-09-16 15:00:31 +0900, Masahiko Sawada wrote:\n> > I've updated the radix tree patch. It's now separated into two patches.\n>\n> cfbot notices a compiler warning:\n> https://cirrus-ci.com/task/6247907681632256?logs=gcc_warning#L446\n>\n> [11:03:05.343] radixtree.c: In function ‘rt_iterate_next’:\n> [11:03:05.343] radixtree.c:1758:15: error: ‘slot’ may be used uninitialized in this function [-Werror=maybe-uninitialized]\n> [11:03:05.343] 1758 | *value_p = *((uint64 *) slot);\n> [11:03:05.343] | ^~~~~~~~~~~~~~~~~~\n>\n\nThanks, I'll fix it in the next version patch.\n\nRegards,\n\n-- \nMasahiko Sawada\nPostgreSQL Contributors Team\nRDS Open Source Databases\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 3 Oct 2022 11:58:33 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Sep 28, 2022 at 12:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Fri, Sep 23, 2022 at 12:11 AM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> >\n> > On Thu, Sep 22, 2022 at 11:46 AM John Naylor <john.naylor@enterprisedb.com> wrote:\n> > > One thing I want to try soon is storing fewer than 16/32 etc entries, so that the whole node fits comfortably inside a power-of-two allocation. That would allow us to use aset without wasting space for the smaller nodes, which would be faster and possibly would solve the fragmentation problem Andres referred to in\n> >\n> > > https://www.postgresql.org/message-id/20220704220038.at2ane5xkymzzssb%40awork3.anarazel.de\n> >\n> > While calculating node sizes that fit within a power-of-two size, I noticed the current base node is a bit wasteful, taking up 8 bytes. The node kind only has a small number of values, so it doesn't really make sense to use an enum here in the struct (in fact, Andres' prototype used a uint8 for node_kind). We could use a bitfield for the count and kind:\n> >\n> > uint16 -- kind and count bitfield\n> > uint8 shift;\n> > uint8 chunk;\n> >\n> > That's only 4 bytes. Plus, if the kind is ever encoded in a pointer tag, the bitfield can just go back to being count only.\n>\n> Good point, agreed.\n>\n> >\n> > Here are the v6 node kinds:\n> >\n> > node4: 8 + 4 +(4) + 4*8 = 48 bytes\n> > node16: 8 + 16 + 16*8 = 152\n> > node32: 8 + 32 + 32*8 = 296\n> > node128: 8 + 256 + 128/8 + 128*8 = 1304\n> > node256: 8 + 256/8 + 256*8 = 2088\n> >\n> > And here are the possible ways we could optimize nodes for space using aset allocation. Parentheses are padding bytes. Even if my math has mistakes, the numbers shouldn't be too far off:\n> >\n> > node3: 4 + 3 +(1) + 3*8 = 32 bytes\n> > node6: 4 + 6 +(6) + 6*8 = 64\n> > node13: 4 + 13 +(7) + 13*8 = 128\n> > node28: 4 + 28 + 28*8 = 256\n> > node31: 4 + 256 + 32/8 + 31*8 = 512 (XXX not good)\n> > node94: 4 + 256 + 96/8 + 94*8 = 1024\n> > node220: 4 + 256 + 224/8 + 220*8 = 2048\n> > node256: = 4096\n> >\n> > The main disadvantage is that node256 would balloon in size.\n>\n> Yeah, node31 and node256 are bloated. We probably could use slab for\n> node256 independently. It's worth trying a benchmark to see how it\n> affects the performance and the tree size.\n>\n> BTW We need to consider not only aset/slab but also DSA since we\n> allocate dead tuple TIDs on DSM in parallel vacuum cases. FYI DSA uses\n> the following size classes:\n>\n> static const uint16 dsa_size_classes[] = {\n> sizeof(dsa_area_span), 0, /* special size classes */\n> 8, 16, 24, 32, 40, 48, 56, 64, /* 8 classes separated by 8 bytes */\n> 80, 96, 112, 128, /* 4 classes separated by 16 bytes */\n> 160, 192, 224, 256, /* 4 classes separated by 32 bytes */\n> 320, 384, 448, 512, /* 4 classes separated by 64 bytes */\n> 640, 768, 896, 1024, /* 4 classes separated by 128 bytes */\n> 1280, 1560, 1816, 2048, /* 4 classes separated by ~256 bytes */\n> 2616, 3120, 3640, 4096, /* 4 classes separated by ~512 bytes */\n> 5456, 6552, 7280, 8192 /* 4 classes separated by ~1024 bytes */\n> };\n>\n> node256 will be classed as 2616, which is still not good.\n>\n> Anyway, I'll implement DSA support for radix tree.\n>\n\nRegarding DSA support, IIUC we need to use dsa_pointer in inner nodes\nto point to its child nodes, instead of C pointers (ig, backend-local\naddress). I'm thinking of a straightforward approach as the first\nstep; inner nodes have a union of rt_node* and dsa_pointer and we\nchoose either one based on whether the radix tree is shared or not. We\nallocate and free the shared memory for individual nodes by\ndsa_allocate() and dsa_free(), respectively. Therefore we need to get\na C pointer from dsa_pointer by using dsa_get_address() while\ndescending the tree. I'm a bit concerned that calling\ndsa_get_address() for every descent could be performance overhead but\nI'm going to measure it anyway.\n\nRegards,\n\n--\nMasahiko Sawada\nPostgreSQL Contributors Team\nRDS Open Source Databases\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 5 Oct 2022 15:45:38 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Oct 5, 2022 at 1:46 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Wed, Sep 28, 2022 at 12:49 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> >\n> > On Fri, Sep 23, 2022 at 12:11 AM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > Yeah, node31 and node256 are bloated. We probably could use slab for\n> > node256 independently. It's worth trying a benchmark to see how it\n> > affects the performance and the tree size.\n\nThis wasn't the focus of your current email, but while experimenting with\nv6 I had another thought about local allocation: If we use the default slab\nblock size of 8192 bytes, then only 3 chunks of size 2088 can fit, right?\nIf so, since aset and DSA also waste at least a few hundred bytes, we could\nstore a useless 256-byte slot array within node256. That way, node128 and\nnode256 share the same start of pointers/values array, so there would be\none less branch for getting that address. In v6, rt_node_get_values and\nrt_node_get_children are not inlined (asde: gcc uses a jump table for 5\nkinds but not for 4), but possibly should be, and the smaller the better.\n\n> Regarding DSA support, IIUC we need to use dsa_pointer in inner nodes\n> to point to its child nodes, instead of C pointers (ig, backend-local\n> address). I'm thinking of a straightforward approach as the first\n> step; inner nodes have a union of rt_node* and dsa_pointer and we\n> choose either one based on whether the radix tree is shared or not. We\n> allocate and free the shared memory for individual nodes by\n> dsa_allocate() and dsa_free(), respectively. Therefore we need to get\n> a C pointer from dsa_pointer by using dsa_get_address() while\n> descending the tree. I'm a bit concerned that calling\n> dsa_get_address() for every descent could be performance overhead but\n> I'm going to measure it anyway.\n\nAre dsa pointers aligned the same as pointers to locally allocated memory?\nMeaning, is the offset portion always a multiple of 4 (or 8)? It seems that\nway from a glance, but I can't say for sure. If the lower 2 bits of a DSA\npointer are never set, we can tag them the same way as a regular pointer.\nThat same technique could help hide the latency of converting the pointer,\nby the same way it would hide the latency of loading parts of a node into\nCPU registers.\n\nOne concern is, handling both local and dsa cases in the same code requires\nmore (predictable) branches and reduces code density. That might be a\nreason in favor of templating to handle each case in its own translation\nunit. But that might be overkill.\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Wed, Oct 5, 2022 at 1:46 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Wed, Sep 28, 2022 at 12:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> >> > On Fri, Sep 23, 2022 at 12:11 AM John Naylor> > <john.naylor@enterprisedb.com> wrote:> > Yeah, node31 and node256 are bloated.  We probably could use slab for> > node256 independently. It's worth trying a benchmark to see how it> > affects the performance and the tree size.This wasn't the focus of your current email, but while experimenting with v6 I had another thought about local allocation: If we use the default slab block size of 8192 bytes, then only 3 chunks of size 2088 can fit, right? If so, since aset and DSA also waste at least a few hundred bytes, we could store a useless 256-byte slot array within node256. That way, node128 and node256 share the same start of pointers/values array, so there would be one less branch for getting that address. In v6, rt_node_get_values and rt_node_get_children are not inlined (asde: gcc uses a jump table for 5 kinds but not for 4), but possibly should be, and the smaller the better. > Regarding DSA support, IIUC we need to use dsa_pointer in inner nodes> to point to its child nodes, instead of C pointers (ig, backend-local> address). I'm thinking of a straightforward approach as the first> step; inner nodes have a union of rt_node* and dsa_pointer and we> choose either one based on whether the radix tree is shared or not. We> allocate and free the shared memory for individual nodes by> dsa_allocate() and dsa_free(), respectively. Therefore we need to get> a C pointer from dsa_pointer by using dsa_get_address() while> descending the tree. I'm a bit concerned that calling> dsa_get_address() for every descent could be performance overhead but> I'm going to measure it anyway.Are dsa pointers aligned the same as pointers to locally allocated memory? Meaning, is the offset portion always a multiple of 4 (or 8)? It seems that way from a glance, but I can't say for sure. If the lower 2 bits of a DSA pointer are never set, we can tag them the same way as a regular pointer. That same technique could help hide the latency of converting the pointer, by the same way it would hide the latency of loading parts of a node into CPU registers.One concern is, handling both local and dsa cases in the same code requires more (predictable) branches and reduces code density. That might be a reason in favor of templating to handle each case in its own translation unit. But that might be overkill.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 5 Oct 2022 16:40:31 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Oct 5, 2022 at 6:40 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Wed, Oct 5, 2022 at 1:46 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Wed, Sep 28, 2022 at 12:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Fri, Sep 23, 2022 at 12:11 AM John Naylor\n> > > <john.naylor@enterprisedb.com> wrote:\n> > > Yeah, node31 and node256 are bloated. We probably could use slab for\n> > > node256 independently. It's worth trying a benchmark to see how it\n> > > affects the performance and the tree size.\n>\n> This wasn't the focus of your current email, but while experimenting with v6 I had another thought about local allocation: If we use the default slab block size of 8192 bytes, then only 3 chunks of size 2088 can fit, right? If so, since aset and DSA also waste at least a few hundred bytes, we could store a useless 256-byte slot array within node256. That way, node128 and node256 share the same start of pointers/values array, so there would be one less branch for getting that address. In v6, rt_node_get_values and rt_node_get_children are not inlined (asde: gcc uses a jump table for 5 kinds but not for 4), but possibly should be, and the smaller the better.\n\nIt would be good for performance but I'm a bit concerned that it's\nhighly optimized to the design of aset and DSA. Since size 2088 will\nbe currently classed as 2616 in DSA, DSA wastes 528 bytes. However, if\nwe introduce a new class of 2304 (=2048 + 256) bytes we cannot store a\nuseless 256-byte and the assumption will be broken.\n\n>\n> > Regarding DSA support, IIUC we need to use dsa_pointer in inner nodes\n> > to point to its child nodes, instead of C pointers (ig, backend-local\n> > address). I'm thinking of a straightforward approach as the first\n> > step; inner nodes have a union of rt_node* and dsa_pointer and we\n> > choose either one based on whether the radix tree is shared or not. We\n> > allocate and free the shared memory for individual nodes by\n> > dsa_allocate() and dsa_free(), respectively. Therefore we need to get\n> > a C pointer from dsa_pointer by using dsa_get_address() while\n> > descending the tree. I'm a bit concerned that calling\n> > dsa_get_address() for every descent could be performance overhead but\n> > I'm going to measure it anyway.\n>\n> Are dsa pointers aligned the same as pointers to locally allocated memory? Meaning, is the offset portion always a multiple of 4 (or 8)?\n\nI think so.\n\n> It seems that way from a glance, but I can't say for sure. If the lower 2 bits of a DSA pointer are never set, we can tag them the same way as a regular pointer. That same technique could help hide the latency of converting the pointer, by the same way it would hide the latency of loading parts of a node into CPU registers.\n>\n> One concern is, handling both local and dsa cases in the same code requires more (predictable) branches and reduces code density. That might be a reason in favor of templating to handle each case in its own translation unit.\n\nRight. We also need to support locking for shared radix tree, which\nwould require more branches.\n\nRegards,\n\n-- \nMasahiko Sawada\nPostgreSQL Contributors Team\nRDS Open Source Databases\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 6 Oct 2022 16:52:26 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Oct 6, 2022 at 2:53 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Wed, Oct 5, 2022 at 6:40 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n> >\n> > This wasn't the focus of your current email, but while experimenting\nwith v6 I had another thought about local allocation: If we use the default\nslab block size of 8192 bytes, then only 3 chunks of size 2088 can fit,\nright? If so, since aset and DSA also waste at least a few hundred bytes,\nwe could store a useless 256-byte slot array within node256. That way,\nnode128 and node256 share the same start of pointers/values array, so there\nwould be one less branch for getting that address. In v6,\nrt_node_get_values and rt_node_get_children are not inlined (asde: gcc uses\na jump table for 5 kinds but not for 4), but possibly should be, and the\nsmaller the better.\n>\n> It would be good for performance but I'm a bit concerned that it's\n> highly optimized to the design of aset and DSA. Since size 2088 will\n> be currently classed as 2616 in DSA, DSA wastes 528 bytes. However, if\n> we introduce a new class of 2304 (=2048 + 256) bytes we cannot store a\n> useless 256-byte and the assumption will be broken.\n\nA new DSA class is hypothetical. A better argument against my idea is that\nSLAB_DEFAULT_BLOCK_SIZE is arbitrary. FWIW, I looked at the prototype just\nnow and the slab block sizes are:\n\nMax(pg_nextpower2_32((MAXALIGN(inner_class_info[i].size) + 16) * 32), 1024)\n\n...which would be 128kB for nodemax. I'm curious about the difference.\n\n> > One concern is, handling both local and dsa cases in the same code\nrequires more (predictable) branches and reduces code density. That might\nbe a reason in favor of templating to handle each case in its own\ntranslation unit.\n>\n> Right. We also need to support locking for shared radix tree, which\n> would require more branches.\n\nHmm, now it seems we'll likely want to template local vs. shared as a later\nstep...\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Oct 6, 2022 at 2:53 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Wed, Oct 5, 2022 at 6:40 PM John Naylor <john.naylor@enterprisedb.com> wrote:> >> > This wasn't the focus of your current email, but while experimenting with v6 I had another thought about local allocation: If we use the default slab block size of 8192 bytes, then only 3 chunks of size 2088 can fit, right? If so, since aset and DSA also waste at least a few hundred bytes, we could store a useless 256-byte slot array within node256. That way, node128 and node256 share the same start of pointers/values array, so there would be one less branch for getting that address. In v6, rt_node_get_values and rt_node_get_children are not inlined (asde: gcc uses a jump table for 5 kinds but not for 4), but possibly should be, and the smaller the better.>> It would be good for performance but I'm a bit concerned that it's> highly optimized to the design of aset and DSA. Since size 2088 will> be currently classed as 2616 in DSA, DSA wastes 528 bytes. However, if> we introduce a new class of 2304 (=2048 + 256) bytes we cannot store a> useless 256-byte and the assumption will be broken.A new DSA class is hypothetical. A better argument against my idea is that SLAB_DEFAULT_BLOCK_SIZE is arbitrary. FWIW, I looked at the prototype just now and the slab block sizes are:Max(pg_nextpower2_32((MAXALIGN(inner_class_info[i].size) + 16) * 32), 1024)...which would be 128kB for nodemax. I'm curious about the difference.> > One concern is, handling both local and dsa cases in the same code requires more (predictable) branches and reduces code density. That might be a reason in favor of templating to handle each case in its own translation unit.>> Right. We also need to support locking for shared radix tree, which> would require more branches.Hmm, now it seems we'll likely want to template local vs. shared as a later step...--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 6 Oct 2022 16:30:52 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Sep 16, 2022 at 1:01 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> In addition to two patches, I've attached the third patch. It's not\n> part of radix tree implementation but introduces a contrib module\n> bench_radix_tree, a tool for radix tree performance benchmarking. It\n> measures loading and lookup performance of both the radix tree and a\n> flat array.\n\nHi Masahiko, I've been using these benchmarks, along with my own\nvariations, to try various things that I've mentioned. I'm long overdue for\nan update, but the picture is not yet complete.\n\nFor now, I have two questions that I can't figure out on my own:\n\n1. There seems to be some non-obvious limit on the number of keys that are\nloaded (or at least what the numbers report). This is independent of the\nnumber of tids per block. Example below:\n\njohn=# select * from bench_shuffle_search(0, 8*1000*1000);\nNOTICE: num_keys = 8000000, height = 3, n4 = 0, n16 = 1, n32 = 0, n128 =\n250000, n256 = 981\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 8000000 | 268435456 | 48000000 | 661 |\n 29 | 276 | 389\n\njohn=# select * from bench_shuffle_search(0, 9*1000*1000);\nNOTICE: num_keys = 8388608, height = 3, n4 = 0, n16 = 1, n32 = 0, n128 =\n262144, n256 = 1028\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 8388608 | 276824064 | 54000000 | 718 |\n 33 | 311 | 446\n\nThe array is the right size, but nkeys hasn't kept pace. Can you reproduce\nthis? Attached is the patch I'm using to show the stats when running the\ntest. (Side note: The numbers look unfavorable for radix tree because I'm\nusing 1 tid per block here.)\n\n2. I found that bench_shuffle_search() is much *faster* for traditional\nbinary search on an array than bench_seq_search(). I've found this to be\ntrue in every case. This seems counterintuitive to me -- any idea why this\nis? Example:\n\njohn=# select * from bench_seq_search(0, 1000000);\nNOTICE: num_keys = 1000000, height = 2, n4 = 0, n16 = 0, n32 = 31251, n128\n= 1, n256 = 122\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 10199040 | 180000000 | 168 |\n106 | 827 | 3348\n\njohn=# select * from bench_shuffle_search(0, 1000000);\nNOTICE: num_keys = 1000000, height = 2, n4 = 0, n16 = 0, n32 = 31251, n128\n= 1, n256 = 122\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 10199040 | 180000000 | 171 |\n107 | 827 | 1400\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 7 Oct 2022 12:29:11 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Oct 7, 2022 at 2:29 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> On Fri, Sep 16, 2022 at 1:01 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > In addition to two patches, I've attached the third patch. It's not\n> > part of radix tree implementation but introduces a contrib module\n> > bench_radix_tree, a tool for radix tree performance benchmarking. It\n> > measures loading and lookup performance of both the radix tree and a\n> > flat array.\n>\n> Hi Masahiko, I've been using these benchmarks, along with my own variations, to try various things that I've mentioned. I'm long overdue for an update, but the picture is not yet complete.\n\nThanks!\n\n> For now, I have two questions that I can't figure out on my own:\n>\n> 1. There seems to be some non-obvious limit on the number of keys that are loaded (or at least what the numbers report). This is independent of the number of tids per block. Example below:\n>\n> john=# select * from bench_shuffle_search(0, 8*1000*1000);\n> NOTICE: num_keys = 8000000, height = 3, n4 = 0, n16 = 1, n32 = 0, n128 = 250000, n256 = 981\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> ---------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 8000000 | 268435456 | 48000000 | 661 | 29 | 276 | 389\n>\n> john=# select * from bench_shuffle_search(0, 9*1000*1000);\n> NOTICE: num_keys = 8388608, height = 3, n4 = 0, n16 = 1, n32 = 0, n128 = 262144, n256 = 1028\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> ---------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 8388608 | 276824064 | 54000000 | 718 | 33 | 311 | 446\n>\n> The array is the right size, but nkeys hasn't kept pace. Can you reproduce this? Attached is the patch I'm using to show the stats when running the test. (Side note: The numbers look unfavorable for radix tree because I'm using 1 tid per block here.)\n\nYes, I can reproduce this. In tid_to_key_off() we need to cast to\nuint64 when packing offset number and block number:\n\n tid_i = ItemPointerGetOffsetNumber(tid);\n tid_i |= ItemPointerGetBlockNumber(tid) << shift;\n\n>\n> 2. I found that bench_shuffle_search() is much *faster* for traditional binary search on an array than bench_seq_search(). I've found this to be true in every case. This seems counterintuitive to me -- any idea why this is? Example:\n>\n> john=# select * from bench_seq_search(0, 1000000);\n> NOTICE: num_keys = 1000000, height = 2, n4 = 0, n16 = 0, n32 = 31251, n128 = 1, n256 = 122\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> ---------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 1000000 | 10199040 | 180000000 | 168 | 106 | 827 | 3348\n>\n> john=# select * from bench_shuffle_search(0, 1000000);\n> NOTICE: num_keys = 1000000, height = 2, n4 = 0, n16 = 0, n32 = 31251, n128 = 1, n256 = 122\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> ---------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 1000000 | 10199040 | 180000000 | 171 | 107 | 827 | 1400\n>\n\nUgh, in shuffle_itemptrs(), we shuffled itemptrs instead of itemptr:\n\n for (int i = 0; i < nitems - 1; i++)\n {\n int j = shuffle_randrange(&state, i, nitems - 1);\n ItemPointerData t = itemptrs[j];\n\n itemptrs[j] = itemptrs[i];\n itemptrs[i] = t;\n\nWith the fix, the results on my environment were:\n\npostgres(1:4093192)=# select * from bench_seq_search(0, 10000000);\n2022-10-07 16:57:03.124 JST [4093192] LOG: num_keys = 10000000,\nheight = 3, n4 = 0, n16 = 1, n32 = 312500, n128 = 0, n256 = 1226\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n----------+------------------+---------------------+------------+---------------+--------------+-----------------\n 10000000 | 101826560 | 1800000000 | 846 |\n 486 | 6096 | 21128\n(1 row)\n\nTime: 28975.566 ms (00:28.976)\npostgres(1:4093192)=# select * from bench_shuffle_search(0, 10000000);\n2022-10-07 16:57:37.476 JST [4093192] LOG: num_keys = 10000000,\nheight = 3, n4 = 0, n16 = 1, n32 = 312500, n128 = 0, n256 = 1226\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n----------+------------------+---------------------+------------+---------------+--------------+-----------------\n 10000000 | 101826560 | 1800000000 | 845 |\n 484 | 32700 | 152583\n(1 row)\n\nI've attached a patch to fix them. Also, I realized that bsearch()\ncould be optimized out so I added code to prevent it:\n\nRegards,\n\n-- \nMasahiko Sawada\nPostgreSQL Contributors Team\nRDS Open Source Databases\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Fri, 7 Oct 2022 17:08:35 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "The following is not quite a full review, but has plenty to think about.\nThere is too much to cover at once, and I have to start somewhere...\n\nMy main concerns are that internal APIs:\n\n1. are difficult to follow\n2. lead to poor branch prediction and too many function calls\n\nSome of the measurements are picking on the SIMD search code, but I go into\ndetails in order to demonstrate how a regression there can go completely\nunnoticed. Hopefully the broader themes are informative.\n\nOn Fri, Oct 7, 2022 at 3:09 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> [fixed benchmarks]\n\nThanks for that! Now I can show clear results on some aspects in a simple\nway. The attached patches (apply on top of v6) are not intended to be\nincorporated as-is quite yet, but do point the way to some reorganization\nthat I think is necessary. I've done some testing on loading, but will\nleave it out for now in the interest of length.\n\n\n0001-0003 are your performance test fix and and some small conveniences for\ntesting. Binary search is turned off, for example, because we know it\nalready. And the sleep call is so I can run perf in a different shell\nsession, on only the search portion.\n\nNote the v6 test loads all block numbers in the range. Since the test item\nids are all below 64 (reasonable), there are always 32 leaf chunks, so all\nthe leaves are node32 and completely full. This had the effect of never\ntaking the byte-wise loop in the proposed pg_lsearch function. These two\naspects make this an easy case for the branch predictor:\n\njohn=# select * from bench_seq_search(0, 1*1000*1000);\nNOTICE: num_keys = 1000000, height = 2, n4 = 0, n16 = 0, n32 = 31251, n128\n= 1, n256 = 122\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 10199040 | 180000000 | 167 |\n 0 | 822 | 0\n\n 1,470,141,841 branches:u\n\n 63,693 branch-misses:u # 0.00% of all\nbranches\n\njohn=# select * from bench_shuffle_search(0, 1*1000*1000);\nNOTICE: num_keys = 1000000, height = 2, n4 = 0, n16 = 0, n32 = 31251, n128\n= 1, n256 = 122\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 10199040 | 180000000 | 168 |\n 0 | 2174 | 0\n\n 1,470,142,569 branches:u\n\n 15,023,983 branch-misses:u # 1.02% of all branches\n\n\n0004 randomizes block selection in the load part of the search test so that\neach block has a 50% chance of being loaded. Note that now we have many\nnode16s where we had none before. Although node 16 and node32 appear to\nshare the same path in the switch statement of rt_node_search(), the chunk\ncomparison and node_get_values() calls each must go through different\nbranches. The shuffle case is most affected, but even the sequential case\nslows down. (The leaves are less full -> there are more of them, so memory\nuse is larger, but it shouldn't matter much, in the sequential case at\nleast)\n\njohn=# select * from bench_seq_search(0, 2*1000*1000);\nNOTICE: num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889,\nn128 = 1, n256 = 245\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 14893056 | 179937720 | 173 |\n0 | 907 | 0\n\n 1,684,114,926 branches:u\n\n 1,989,901 branch-misses:u # 0.12% of all branches\n\njohn=# select * from bench_shuffle_search(0, 2*1000*1000);\nNOTICE: num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889,\nn128 = 1, n256 = 245\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 14893056 | 179937720 | 173 |\n0 | 2890 | 0\n\n 1,684,115,844 branches:u\n\n 34,215,740 branch-misses:u # 2.03% of all branches\n\n\n0005 replaces pg_lsearch with a branch-free SIMD search. Note that it\nretains full portability and gains predictable performance. For\ndemonstration, it's used on all three linear-search types. Although I'm\nsure it'd be way too slow for node4, this benchmark hardly has any so it's\nok.\n\njohn=# select * from bench_seq_search(0, 2*1000*1000);\nNOTICE: num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889,\nn128 = 1, n256 = 245\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 14893056 | 179937720 | 176 |\n0 | 867 | 0\n\n 1,469,540,357 branches:u\n\n 96,678 branch-misses:u # 0.01% of all\nbranches\n\njohn=# select * from bench_shuffle_search(0, 2*1000*1000);\nNOTICE: num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889,\nn128 = 1, n256 = 245\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 14893056 | 179937720 | 171 |\n0 | 2530 | 0\n\n 1,469,540,533 branches:u\n\n 15,019,975 branch-misses:u # 1.02% of all branches\n\n\n0006 removes node16, and 0007 avoids a function call to introspect node\ntype. 0006 is really to make 0007 simpler to code. The crucial point here\nis that calling out to rt_node_get_values/children() to figure out what\ntype we are is costly. With these patches, searching an unevenly populated\nload is the same or faster than the original sequential load, despite\ntaking twice as much memory. (And, as I've noted before, decoupling size\nclass from node kind would win the memory back.)\n\njohn=# select * from bench_seq_search(0, 2*1000*1000);\nNOTICE: num_keys = 999654, height = 2, n4 = 1, n32 = 62499, n128 = 1, n256\n= 245\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 20381696 | 179937720 | 171 |\n0 | 717 | 0\n\n 1,349,614,294 branches:u\n\n 1,313 branch-misses:u # 0.00% of all\nbranches\n\njohn=# select * from bench_shuffle_search(0, 2*1000*1000);\nNOTICE: num_keys = 999654, height = 2, n4 = 1, n32 = 62499, n128 = 1, n256\n= 245\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 20381696 | 179937720 | 172 |\n0 | 2202 | 0\n\n 1,349,614,741 branches:u\n\n 30,592 branch-misses:u # 0.00% of all\nbranches\n\nExpanding this point, once a path branches based on node kind, there should\nbe no reason to ever forget the kind. Ther abstractions in v6 have\ndisadvantages. I understand the reasoning -- to reduce duplication of code.\nHowever, done this way, less code in the text editor leads to *more* code\n(i.e. costly function calls and branches) on the machine level.\n\nI haven't looked at insert/load performance carefully, but it's clear it\nsuffers from the same amnesia. prepare_node_for_insert() branches based on\nthe kind. If it must call rt_node_grow(), that function has no idea where\nit came from and must branch again. When prepare_node_for_insert() returns\nwe again have no idea what the kind is, so must branch again. And if we are\none of the three linear-search nodes, we later do another function call,\nwhere we encounter a 5-way jump table because the caller could be anything\nat all.\n\nSome of this could be worked around with always-inline functions to which\nwe pass a const node kind, and let the compiler get rid of the branches\netc. But many cases are probably not even worth doing that. For example, I\ndon't think prepare_node_for_insert() is a useful abstraction to begin\nwith. It returns an index, but only for linear nodes. Lookup nodes get a\nreturn value of zero. There is not enough commonality here.\n\nAlong the same lines, there are a number of places that have branches as a\nconsequence of treating inner nodes and leaves with the same api:\n\nrt_node_iterate_next\nchunk_array_node_get_slot\nnode_128/256_get_slot\nrt_node_search\n\nI'm leaning towards splitting these out into specialized functions for each\ninner and leaf. This is a bit painful for the last one, but perhaps if we\nare resigned to templating the shared-mem case, maybe we can template some\nof the inner/leaf stuff. Something to think about for later, but for now I\nbelieve we have to accept some code duplication as a prerequisite for\ndecent performance as well as readability.\n\nFor the next steps, we need to proceed cautiously because there is a lot in\nthe air at the moment. Here are some aspects I would find desirable. If\nthere are impracticalities I haven't thought of, we can discuss further. I\ndon't pretend to know the practical consequences of every change I mention.\n\n- If you have started coding the shared memory case, I'd advise to continue\nso we can see what that looks like. If that has not gotten beyond the\ndesign stage, I'd like to first see an attempt at tearing down some of the\nclumsier abstractions in the current patch.\n- As a \"smoke test\", there should ideally be nothing as general as\nrt_node_get_children/values(). We should ideally always know what kind we\nare if we found out earlier.\n- For distinguishing between linear nodes, perhaps some always-inline\nfunctions can help hide details. But at the same time, trying to treat them\nthe same is not always worthwhile.\n- Start to separate treatment of inner/leaves and see how it goes.\n- I firmly believe we only need 4 node *kinds*, and later we can decouple\nthe size classes as a separate concept. I'm willing to put serious time\ninto that once the broad details are right. I will also investigate pointer\ntagging if we can confirm that can work similarly for dsa pointers.\n\nRegarding size class decoupling, I'll respond to a point made earlier:\n\nOn Fri, Sep 30, 2022 at 10:47 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> With this idea, we can just repalloc() to grow to the larger size in a\n> pair but I'm slightly concerned that the more size class we use, the\n> more frequent the node needs to grow.\n\nWell, yes, but that's orthogonal. For example, v6 has 5 node kinds. Imagine\nthat we have 4 node kinds, but the SIMD node kind used 2 size classes. Then\nthe nodes would grow at *exactly* the same frequency as they do today. I\nlisted many ways a size class could fit into a power-of-two (and there are\nmore), but we have a choice in how many to actually use. It's a trade off\nbetween memory usage and complexity.\n\n> If we want to support node\n> shrink, the deletion is also affected.\n\nNot necessarily. We don't have to shrink at the same granularity as\ngrowing. My evidence is simple: we don't shrink at all now. :-)\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nThe following is not quite a full review, but has plenty to think about. There is too much to cover at once, and I have to start somewhere...My main concerns are that internal APIs:1. are difficult to follow2. lead to poor branch prediction and too many function callsSome of the measurements are picking on the SIMD search code, but I go into details in order to demonstrate how a regression there can go completely unnoticed. Hopefully the broader themes are informative.On Fri, Oct 7, 2022 at 3:09 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> [fixed benchmarks]Thanks for that! Now I can show clear results on some aspects in a simple way. The attached patches (apply on top of v6) are not intended to be incorporated as-is quite yet, but do point the way to some reorganization that I think is necessary. I've done some testing on loading, but will leave it out for now in the interest of length.0001-0003 are your performance test fix and and some small conveniences for testing. Binary search is turned off, for example, because we know it already. And the sleep call is so I can run perf in a different shell session, on only the search portion.Note the v6 test loads all block numbers in the range. Since the test item ids are all below 64 (reasonable), there are always 32 leaf chunks, so all the leaves are node32 and completely full. This had the effect of never taking the byte-wise loop in the proposed pg_lsearch function. These two aspects make this an easy case for the branch predictor:john=# select * from bench_seq_search(0, 1*1000*1000);NOTICE:  num_keys = 1000000, height = 2, n4 = 0, n16 = 0, n32 = 31251, n128 = 1, n256 = 122NOTICE:  sleeping for 2 seconds...  nkeys  | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms ---------+------------------+---------------------+------------+---------------+--------------+----------------- 1000000 |         10199040 |           180000000 |        167 |             0 |          822 |               0     1,470,141,841      branches:u                                                              63,693      branch-misses:u           #    0.00% of all branches   john=# select * from bench_shuffle_search(0, 1*1000*1000);NOTICE:  num_keys = 1000000, height = 2, n4 = 0, n16 = 0, n32 = 31251, n128 = 1, n256 = 122NOTICE:  sleeping for 2 seconds...  nkeys  | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms ---------+------------------+---------------------+------------+---------------+--------------+----------------- 1000000 |         10199040 |           180000000 |        168 |             0 |         2174 |               0     1,470,142,569      branches:u                                                          15,023,983      branch-misses:u           #    1.02% of all branches0004 randomizes block selection in the load part of the search test so that each block has a 50% chance of being loaded.  Note that now we have many node16s where we had none before. Although node 16 and node32 appear to share the same path in the switch statement of rt_node_search(), the chunk comparison and node_get_values() calls each must go through different branches. The shuffle case is most affected, but even the sequential case slows down. (The leaves are less full -> there are more of them, so memory use is larger, but it shouldn't matter much, in the sequential case at least)john=# select * from bench_seq_search(0, 2*1000*1000);NOTICE:  num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245NOTICE:  sleeping for 2 seconds... nkeys  | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms --------+------------------+---------------------+------------+---------------+--------------+----------------- 999654 |         14893056 |           179937720 |        173 |             0 |          907 |               0     1,684,114,926      branches:u                                                           1,989,901      branch-misses:u           #    0.12% of all branchesjohn=# select * from bench_shuffle_search(0, 2*1000*1000);NOTICE:  num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245NOTICE:  sleeping for 2 seconds... nkeys  | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms --------+------------------+---------------------+------------+---------------+--------------+----------------- 999654 |         14893056 |           179937720 |        173 |             0 |         2890 |               0     1,684,115,844      branches:u                                                          34,215,740      branch-misses:u           #    2.03% of all branches0005 replaces pg_lsearch with a branch-free SIMD search. Note that it retains full portability and gains predictable performance. For demonstration, it's used on all three linear-search types. Although I'm sure it'd be way too slow for node4, this benchmark hardly has any so it's ok.john=# select * from bench_seq_search(0, 2*1000*1000);NOTICE:  num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245NOTICE:  sleeping for 2 seconds... nkeys  | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms --------+------------------+---------------------+------------+---------------+--------------+----------------- 999654 |         14893056 |           179937720 |        176 |             0 |          867 |               0     1,469,540,357      branches:u                                                              96,678      branch-misses:u           #    0.01% of all branches   john=# select * from bench_shuffle_search(0, 2*1000*1000);NOTICE:  num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245NOTICE:  sleeping for 2 seconds... nkeys  | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms --------+------------------+---------------------+------------+---------------+--------------+----------------- 999654 |         14893056 |           179937720 |        171 |             0 |         2530 |               0     1,469,540,533      branches:u                                                          15,019,975      branch-misses:u           #    1.02% of all branches0006 removes node16, and 0007 avoids a function call to introspect node type. 0006 is really to make 0007 simpler to code. The crucial point here is that calling out to rt_node_get_values/children() to figure out what type we are is costly. With these patches, searching an unevenly populated load is the same or faster than the original sequential load, despite taking twice as much memory. (And, as I've noted before, decoupling size class from node kind would win the memory back.)john=# select * from bench_seq_search(0, 2*1000*1000);NOTICE:  num_keys = 999654, height = 2, n4 = 1, n32 = 62499, n128 = 1, n256 = 245NOTICE:  sleeping for 2 seconds... nkeys  | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms --------+------------------+---------------------+------------+---------------+--------------+----------------- 999654 |         20381696 |           179937720 |        171 |             0 |          717 |               0     1,349,614,294      branches:u                                                               1,313      branch-misses:u           #    0.00% of all branches   john=# select * from bench_shuffle_search(0, 2*1000*1000);NOTICE:  num_keys = 999654, height = 2, n4 = 1, n32 = 62499, n128 = 1, n256 = 245NOTICE:  sleeping for 2 seconds... nkeys  | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms --------+------------------+---------------------+------------+---------------+--------------+----------------- 999654 |         20381696 |           179937720 |        172 |             0 |         2202 |               0     1,349,614,741      branches:u                                                              30,592      branch-misses:u           #    0.00% of all branches  Expanding this point, once a path branches based on node kind, there should be no reason to ever forget the kind. Ther abstractions in v6 have disadvantages. I understand the reasoning -- to reduce duplication of code. However, done this way, less code in the text editor leads to *more* code (i.e. costly function calls and branches) on the machine level.I haven't looked at insert/load performance carefully, but it's clear it suffers from the same amnesia. prepare_node_for_insert() branches based on the kind. If it must call rt_node_grow(), that function has no idea where it came from and must branch again. When prepare_node_for_insert() returns we again have no idea what the kind is, so must branch again. And if we are one of the three linear-search nodes, we later do another function call, where we encounter a 5-way jump table because the caller could be anything at all.Some of this could be worked around with always-inline functions to which we pass a const node kind, and let the compiler get rid of the branches etc. But many cases are probably not even worth doing that. For example, I don't think prepare_node_for_insert() is a useful abstraction to begin with. It returns an index, but only for linear nodes. Lookup nodes get a return value of zero. There is not enough commonality here.Along the same lines, there are a number of places that have branches as a consequence of treating inner nodes and leaves with the same api:rt_node_iterate_nextchunk_array_node_get_slotnode_128/256_get_slotrt_node_searchI'm leaning towards splitting these out into specialized functions for each inner and leaf. This is a bit painful for the last one, but perhaps if we are resigned to templating the shared-mem case, maybe we can template some of the inner/leaf stuff. Something to think about for later, but for now I believe we have to accept some code duplication as a prerequisite for decent performance as well as readability.For the next steps, we need to proceed cautiously because there is a lot in the air at the moment. Here are some aspects I would find desirable. If there are impracticalities I haven't thought of, we can discuss further. I don't pretend to know the practical consequences of every change I mention.- If you have started coding the shared memory case, I'd advise to continue so we can see what that looks like. If that has not gotten beyond the design stage, I'd like to first see an attempt at tearing down some of the clumsier abstractions in the current patch.- As a \"smoke test\", there should ideally be nothing as general as rt_node_get_children/values(). We should ideally always know what kind we are if we found out earlier.- For distinguishing between linear nodes, perhaps some always-inline functions can help hide details. But at the same time, trying to treat them the same is not always worthwhile.- Start to separate treatment of inner/leaves and see how it goes.- I firmly believe we only need 4 node *kinds*, and later we can decouple the size classes as a separate concept. I'm willing to put serious time into that once the broad details are right. I will also investigate pointer tagging if we can confirm that can work similarly for dsa pointers.Regarding size class decoupling, I'll respond to a point made earlier:On Fri, Sep 30, 2022 at 10:47 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> With this idea, we can just repalloc() to grow to the larger size in a> pair but I'm slightly concerned that the more size class we use, the> more frequent the node needs to grow.Well, yes, but that's orthogonal. For example, v6 has 5 node kinds. Imagine that we have 4 node kinds, but the SIMD node kind used 2 size classes. Then the nodes would grow at *exactly* the same frequency as they do today. I listed many ways a size class could fit into a power-of-two (and there are more), but we have a choice in how many to actually use. It's a trade off between memory usage and complexity.> If we want to support node> shrink, the deletion is also affected.Not necessarily. We don't have to shrink at the same granularity as growing. My evidence is simple: we don't shrink at all now. :-)--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 10 Oct 2022 12:16:30 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Oct 10, 2022 at 12:16 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n> Thanks for that! Now I can show clear results on some aspects in a simple\nway. The attached patches (apply on top of v6)\n\nForgot the patchset...\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 10 Oct 2022 12:54:50 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn Mon, Oct 10, 2022 at 2:16 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> The following is not quite a full review, but has plenty to think about. There is too much to cover at once, and I have to start somewhere...\n>\n> My main concerns are that internal APIs:\n>\n> 1. are difficult to follow\n> 2. lead to poor branch prediction and too many function calls\n>\n> Some of the measurements are picking on the SIMD search code, but I go into details in order to demonstrate how a regression there can go completely unnoticed. Hopefully the broader themes are informative.\n>\n> On Fri, Oct 7, 2022 at 3:09 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > [fixed benchmarks]\n>\n> Thanks for that! Now I can show clear results on some aspects in a simple way. The attached patches (apply on top of v6) are not intended to be incorporated as-is quite yet, but do point the way to some reorganization that I think is necessary. I've done some testing on loading, but will leave it out for now in the interest of length.\n>\n>\n> 0001-0003 are your performance test fix and and some small conveniences for testing. Binary search is turned off, for example, because we know it already. And the sleep call is so I can run perf in a different shell session, on only the search portion.\n>\n> Note the v6 test loads all block numbers in the range. Since the test item ids are all below 64 (reasonable), there are always 32 leaf chunks, so all the leaves are node32 and completely full. This had the effect of never taking the byte-wise loop in the proposed pg_lsearch function. These two aspects make this an easy case for the branch predictor:\n>\n> john=# select * from bench_seq_search(0, 1*1000*1000);\n> NOTICE: num_keys = 1000000, height = 2, n4 = 0, n16 = 0, n32 = 31251, n128 = 1, n256 = 122\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> ---------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 1000000 | 10199040 | 180000000 | 167 | 0 | 822 | 0\n>\n> 1,470,141,841 branches:u\n> 63,693 branch-misses:u # 0.00% of all branches\n>\n> john=# select * from bench_shuffle_search(0, 1*1000*1000);\n> NOTICE: num_keys = 1000000, height = 2, n4 = 0, n16 = 0, n32 = 31251, n128 = 1, n256 = 122\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> ---------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 1000000 | 10199040 | 180000000 | 168 | 0 | 2174 | 0\n>\n> 1,470,142,569 branches:u\n> 15,023,983 branch-misses:u # 1.02% of all branches\n>\n>\n> 0004 randomizes block selection in the load part of the search test so that each block has a 50% chance of being loaded. Note that now we have many node16s where we had none before. Although node 16 and node32 appear to share the same path in the switch statement of rt_node_search(), the chunk comparison and node_get_values() calls each must go through different branches. The shuffle case is most affected, but even the sequential case slows down. (The leaves are less full -> there are more of them, so memory use is larger, but it shouldn't matter much, in the sequential case at least)\n>\n> john=# select * from bench_seq_search(0, 2*1000*1000);\n> NOTICE: num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 999654 | 14893056 | 179937720 | 173 | 0 | 907 | 0\n>\n> 1,684,114,926 branches:u\n> 1,989,901 branch-misses:u # 0.12% of all branches\n>\n> john=# select * from bench_shuffle_search(0, 2*1000*1000);\n> NOTICE: num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 999654 | 14893056 | 179937720 | 173 | 0 | 2890 | 0\n>\n> 1,684,115,844 branches:u\n> 34,215,740 branch-misses:u # 2.03% of all branches\n>\n>\n> 0005 replaces pg_lsearch with a branch-free SIMD search. Note that it retains full portability and gains predictable performance. For demonstration, it's used on all three linear-search types. Although I'm sure it'd be way too slow for node4, this benchmark hardly has any so it's ok.\n>\n> john=# select * from bench_seq_search(0, 2*1000*1000);\n> NOTICE: num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 999654 | 14893056 | 179937720 | 176 | 0 | 867 | 0\n>\n> 1,469,540,357 branches:u\n> 96,678 branch-misses:u # 0.01% of all branches\n>\n> john=# select * from bench_shuffle_search(0, 2*1000*1000);\n> NOTICE: num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 999654 | 14893056 | 179937720 | 171 | 0 | 2530 | 0\n>\n> 1,469,540,533 branches:u\n> 15,019,975 branch-misses:u # 1.02% of all branches\n>\n>\n> 0006 removes node16, and 0007 avoids a function call to introspect node type. 0006 is really to make 0007 simpler to code. The crucial point here is that calling out to rt_node_get_values/children() to figure out what type we are is costly. With these patches, searching an unevenly populated load is the same or faster than the original sequential load, despite taking twice as much memory. (And, as I've noted before, decoupling size class from node kind would win the memory back.)\n>\n> john=# select * from bench_seq_search(0, 2*1000*1000);\n> NOTICE: num_keys = 999654, height = 2, n4 = 1, n32 = 62499, n128 = 1, n256 = 245\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 999654 | 20381696 | 179937720 | 171 | 0 | 717 | 0\n>\n> 1,349,614,294 branches:u\n> 1,313 branch-misses:u # 0.00% of all branches\n>\n> john=# select * from bench_shuffle_search(0, 2*1000*1000);\n> NOTICE: num_keys = 999654, height = 2, n4 = 1, n32 = 62499, n128 = 1, n256 = 245\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 999654 | 20381696 | 179937720 | 172 | 0 | 2202 | 0\n>\n> 1,349,614,741 branches:u\n> 30,592 branch-misses:u # 0.00% of all branches\n>\n> Expanding this point, once a path branches based on node kind, there should be no reason to ever forget the kind. Ther abstractions in v6 have disadvantages. I understand the reasoning -- to reduce duplication of code. However, done this way, less code in the text editor leads to *more* code (i.e. costly function calls and branches) on the machine level.\n\nRight. When updating the patch from v4 to v5, I've eliminated the\nduplication of code between each node type as much as possible, which\nin turn produced more code on the machine level. The resulst of your\nexperiment clearly showed the bad side of this work. FWIW I've also\nconfirmed your changes in my environment (I've added the third\nargument to turn on and off the randomizes block selection proposed in\n0004 patch):\n\n* w/o patches\npostgres(1:361692)=# select * from bench_seq_search(0, 1 * 1000 * 1000, false);\n2022-10-14 11:33:15.460 JST [361692] LOG: num_keys = 1000000, height\n= 2, n4 = 0, n16 = 0, n32 = 31251, n128 = 1, n256 = 122\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 10199040 | 180000000 | 87 |\n | 462 |\n(1 row)\n\n1590104944 branches:u # 3.430 G/sec\n 65957 branch-misses:u # 0.00% of all branches\n\npostgres(1:361692)=# select * from bench_seq_search(0, 2 * 1000 * 1000, true);\n2022-10-14 11:33:28.934 JST [361692] LOG: num_keys = 999654, height =\n2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 14893056 | 179937720 | 91 |\n | 497 |\n(1 row)\n\n1748249456 branches:u # 3.506 G/sec\n 481074 branch-misses:u # 0.03% of all branches\n\npostgres(1:361692)=# select * from bench_shuffle_search(0, 1 * 1000 *\n1000, false);\n2022-10-14 11:33:38.378 JST [361692] LOG: num_keys = 1000000, height\n= 2, n4 = 0, n16 = 0, n32 = 31251, n128 = 1, n256 = 122\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 10199040 | 180000000 | 86 |\n | 1290 |\n(1 row)\n\n1590105370 branches:u # 1.231 G/sec\n 15039443 branch-misses:u # 0.95% of all branches\n\nTime: 4166.346 ms (00:04.166)\npostgres(1:361692)=# select * from bench_shuffle_search(0, 2 * 1000 *\n1000, true);\n2022-10-14 11:33:51.556 JST [361692] LOG: num_keys = 999654, height =\n2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 14893056 | 179937720 | 90 |\n | 1536 |\n(1 row)\n\n1748250497 branches:u # 1.137 G/sec\n 28125016 branch-misses:u # 1.61% of all branches\n\n* w/ all patches\npostgres(1:360358)=# select * from bench_seq_search(0, 1 * 1000 * 1000, false);\n2022-10-14 11:29:27.232 JST [360358] LOG: num_keys = 1000000, height\n= 2, n4 = 0, n32 = 31251, n128 = 1, n256 = 122\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 10199040 | 180000000 | 81 |\n | 432 |\n(1 row)\n\n1380062209 branches:u # 3.185 G/sec\n 1066 branch-misses:u # 0.00% of all branches\n\npostgres(1:360358)=# select * from bench_seq_search(0, 2 * 1000 * 1000, true);\n2022-10-14 11:29:46.380 JST [360358] LOG: num_keys = 999654, height =\n2, n4 = 1, n32 = 62499, n128 = 1, n256 = 245\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 20381696 | 179937720 | 88 |\n | 438 |\n(1 row)\n\n1379640815 branches:u # 3.133 G/sec\n 1332 branch-misses:u # 0.00% of all branches\n\npostgres(1:360358)=# select * from bench_shuffle_search(0, 1 * 1000 *\n1000, false);\n2022-10-14 11:30:00.943 JST [360358] LOG: num_keys = 1000000, height\n= 2, n4 = 0, n32 = 31251, n128 = 1, n256 = 122\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 10199040 | 180000000 | 81 |\n | 994 |\n(1 row)\n\n1380062386 branches:u # 1.386 G/sec\n 18368 branch-misses:u # 0.00% of all branches\n\npostgres(1:360358)=# select * from bench_shuffle_search(0, 2 * 1000 *\n1000, true);\n2022-10-14 11:30:15.944 JST [360358] LOG: num_keys = 999654, height =\n2, n4 = 1, n32 = 62499, n128 = 1, n256 = 245\nNOTICE: sleeping for 2 seconds...\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 20381696 | 179937720 | 88 |\n | 1098 |\n(1 row)\n\n1379641503 branches:u # 1.254 G/sec\n 18973 branch-misses:u # 0.00% of all branches\n\n> I haven't looked at insert/load performance carefully, but it's clear it suffers from the same amnesia. prepare_node_for_insert() branches based on the kind. If it must call rt_node_grow(), that function has no idea where it came from and must branch again. When prepare_node_for_insert() returns we again have no idea what the kind is, so must branch again. And if we are one of the three linear-search nodes, we later do another function call, where we encounter a 5-way jump table because the caller could be anything at all.\n>\n> Some of this could be worked around with always-inline functions to which we pass a const node kind, and let the compiler get rid of the branches etc. But many cases are probably not even worth doing that. For example, I don't think prepare_node_for_insert() is a useful abstraction to begin with. It returns an index, but only for linear nodes. Lookup nodes get a return value of zero. There is not enough commonality here.\n\nAgreed.\n\n>\n> Along the same lines, there are a number of places that have branches as a consequence of treating inner nodes and leaves with the same api:\n>\n> rt_node_iterate_next\n> chunk_array_node_get_slot\n> node_128/256_get_slot\n> rt_node_search\n>\n> I'm leaning towards splitting these out into specialized functions for each inner and leaf. This is a bit painful for the last one, but perhaps if we are resigned to templating the shared-mem case, maybe we can template some of the inner/leaf stuff. Something to think about for later, but for now I believe we have to accept some code duplication as a prerequisite for decent performance as well as readability.\n\nAgreed.\n\n>\n> For the next steps, we need to proceed cautiously because there is a lot in the air at the moment. Here are some aspects I would find desirable. If there are impracticalities I haven't thought of, we can discuss further. I don't pretend to know the practical consequences of every change I mention.\n>\n> - If you have started coding the shared memory case, I'd advise to continue so we can see what that looks like. If that has not gotten beyond the design stage, I'd like to first see an attempt at tearing down some of the clumsier abstractions in the current patch.\n> - As a \"smoke test\", there should ideally be nothing as general as rt_node_get_children/values(). We should ideally always know what kind we are if we found out earlier.\n> - For distinguishing between linear nodes, perhaps some always-inline functions can help hide details. But at the same time, trying to treat them the same is not always worthwhile.\n> - Start to separate treatment of inner/leaves and see how it goes.\n\nSince I've not started coding the shared memory case seriously, I'm\ngoing to start with eliminating abstractions and splitting the\ntreatment of inner and leaf nodes.\n\n> - I firmly believe we only need 4 node *kinds*, and later we can decouple the size classes as a separate concept. I'm willing to put serious time into that once the broad details are right. I will also investigate pointer tagging if we can confirm that can work similarly for dsa pointers.\n\nI'll keep 4 node kinds. And we can later try to introduce classes into\neach node kind.\n\n>\n> Regarding size class decoupling, I'll respond to a point made earlier:\n>\n> On Fri, Sep 30, 2022 at 10:47 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > With this idea, we can just repalloc() to grow to the larger size in a\n> > pair but I'm slightly concerned that the more size class we use, the\n> > more frequent the node needs to grow.\n>\n> Well, yes, but that's orthogonal. For example, v6 has 5 node kinds. Imagine that we have 4 node kinds, but the SIMD node kind used 2 size classes. Then the nodes would grow at *exactly* the same frequency as they do today. I listed many ways a size class could fit into a power-of-two (and there are more), but we have a choice in how many to actually use. It's a trade off between memory usage and complexity.\n\nAgreed.\n\nRegards,\n\n-- \nMasahiko Sawada\nPostgreSQL Contributors Team\nRDS Open Source Databases\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 14 Oct 2022 16:12:48 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Oct 14, 2022 at 4:12 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> Hi,\n>\n> On Mon, Oct 10, 2022 at 2:16 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > The following is not quite a full review, but has plenty to think about. There is too much to cover at once, and I have to start somewhere...\n> >\n> > My main concerns are that internal APIs:\n> >\n> > 1. are difficult to follow\n> > 2. lead to poor branch prediction and too many function calls\n> >\n> > Some of the measurements are picking on the SIMD search code, but I go into details in order to demonstrate how a regression there can go completely unnoticed. Hopefully the broader themes are informative.\n> >\n> > On Fri, Oct 7, 2022 at 3:09 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > [fixed benchmarks]\n> >\n> > Thanks for that! Now I can show clear results on some aspects in a simple way. The attached patches (apply on top of v6) are not intended to be incorporated as-is quite yet, but do point the way to some reorganization that I think is necessary. I've done some testing on loading, but will leave it out for now in the interest of length.\n> >\n> >\n> > 0001-0003 are your performance test fix and and some small conveniences for testing. Binary search is turned off, for example, because we know it already. And the sleep call is so I can run perf in a different shell session, on only the search portion.\n> >\n> > Note the v6 test loads all block numbers in the range. Since the test item ids are all below 64 (reasonable), there are always 32 leaf chunks, so all the leaves are node32 and completely full. This had the effect of never taking the byte-wise loop in the proposed pg_lsearch function. These two aspects make this an easy case for the branch predictor:\n> >\n> > john=# select * from bench_seq_search(0, 1*1000*1000);\n> > NOTICE: num_keys = 1000000, height = 2, n4 = 0, n16 = 0, n32 = 31251, n128 = 1, n256 = 122\n> > NOTICE: sleeping for 2 seconds...\n> > nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> > ---------+------------------+---------------------+------------+---------------+--------------+-----------------\n> > 1000000 | 10199040 | 180000000 | 167 | 0 | 822 | 0\n> >\n> > 1,470,141,841 branches:u\n> > 63,693 branch-misses:u # 0.00% of all branches\n> >\n> > john=# select * from bench_shuffle_search(0, 1*1000*1000);\n> > NOTICE: num_keys = 1000000, height = 2, n4 = 0, n16 = 0, n32 = 31251, n128 = 1, n256 = 122\n> > NOTICE: sleeping for 2 seconds...\n> > nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> > ---------+------------------+---------------------+------------+---------------+--------------+-----------------\n> > 1000000 | 10199040 | 180000000 | 168 | 0 | 2174 | 0\n> >\n> > 1,470,142,569 branches:u\n> > 15,023,983 branch-misses:u # 1.02% of all branches\n> >\n> >\n> > 0004 randomizes block selection in the load part of the search test so that each block has a 50% chance of being loaded. Note that now we have many node16s where we had none before. Although node 16 and node32 appear to share the same path in the switch statement of rt_node_search(), the chunk comparison and node_get_values() calls each must go through different branches. The shuffle case is most affected, but even the sequential case slows down. (The leaves are less full -> there are more of them, so memory use is larger, but it shouldn't matter much, in the sequential case at least)\n> >\n> > john=# select * from bench_seq_search(0, 2*1000*1000);\n> > NOTICE: num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245\n> > NOTICE: sleeping for 2 seconds...\n> > nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> > --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> > 999654 | 14893056 | 179937720 | 173 | 0 | 907 | 0\n> >\n> > 1,684,114,926 branches:u\n> > 1,989,901 branch-misses:u # 0.12% of all branches\n> >\n> > john=# select * from bench_shuffle_search(0, 2*1000*1000);\n> > NOTICE: num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245\n> > NOTICE: sleeping for 2 seconds...\n> > nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> > --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> > 999654 | 14893056 | 179937720 | 173 | 0 | 2890 | 0\n> >\n> > 1,684,115,844 branches:u\n> > 34,215,740 branch-misses:u # 2.03% of all branches\n> >\n> >\n> > 0005 replaces pg_lsearch with a branch-free SIMD search. Note that it retains full portability and gains predictable performance. For demonstration, it's used on all three linear-search types. Although I'm sure it'd be way too slow for node4, this benchmark hardly has any so it's ok.\n> >\n> > john=# select * from bench_seq_search(0, 2*1000*1000);\n> > NOTICE: num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245\n> > NOTICE: sleeping for 2 seconds...\n> > nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> > --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> > 999654 | 14893056 | 179937720 | 176 | 0 | 867 | 0\n> >\n> > 1,469,540,357 branches:u\n> > 96,678 branch-misses:u # 0.01% of all branches\n> >\n> > john=# select * from bench_shuffle_search(0, 2*1000*1000);\n> > NOTICE: num_keys = 999654, height = 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245\n> > NOTICE: sleeping for 2 seconds...\n> > nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> > --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> > 999654 | 14893056 | 179937720 | 171 | 0 | 2530 | 0\n> >\n> > 1,469,540,533 branches:u\n> > 15,019,975 branch-misses:u # 1.02% of all branches\n> >\n> >\n> > 0006 removes node16, and 0007 avoids a function call to introspect node type. 0006 is really to make 0007 simpler to code. The crucial point here is that calling out to rt_node_get_values/children() to figure out what type we are is costly. With these patches, searching an unevenly populated load is the same or faster than the original sequential load, despite taking twice as much memory. (And, as I've noted before, decoupling size class from node kind would win the memory back.)\n> >\n> > john=# select * from bench_seq_search(0, 2*1000*1000);\n> > NOTICE: num_keys = 999654, height = 2, n4 = 1, n32 = 62499, n128 = 1, n256 = 245\n> > NOTICE: sleeping for 2 seconds...\n> > nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> > --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> > 999654 | 20381696 | 179937720 | 171 | 0 | 717 | 0\n> >\n> > 1,349,614,294 branches:u\n> > 1,313 branch-misses:u # 0.00% of all branches\n> >\n> > john=# select * from bench_shuffle_search(0, 2*1000*1000);\n> > NOTICE: num_keys = 999654, height = 2, n4 = 1, n32 = 62499, n128 = 1, n256 = 245\n> > NOTICE: sleeping for 2 seconds...\n> > nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms | array_load_ms | rt_search_ms | array_serach_ms\n> > --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> > 999654 | 20381696 | 179937720 | 172 | 0 | 2202 | 0\n> >\n> > 1,349,614,741 branches:u\n> > 30,592 branch-misses:u # 0.00% of all branches\n> >\n> > Expanding this point, once a path branches based on node kind, there should be no reason to ever forget the kind. Ther abstractions in v6 have disadvantages. I understand the reasoning -- to reduce duplication of code. However, done this way, less code in the text editor leads to *more* code (i.e. costly function calls and branches) on the machine level.\n>\n> Right. When updating the patch from v4 to v5, I've eliminated the\n> duplication of code between each node type as much as possible, which\n> in turn produced more code on the machine level. The resulst of your\n> experiment clearly showed the bad side of this work. FWIW I've also\n> confirmed your changes in my environment (I've added the third\n> argument to turn on and off the randomizes block selection proposed in\n> 0004 patch):\n>\n> * w/o patches\n> postgres(1:361692)=# select * from bench_seq_search(0, 1 * 1000 * 1000, false);\n> 2022-10-14 11:33:15.460 JST [361692] LOG: num_keys = 1000000, height\n> = 2, n4 = 0, n16 = 0, n32 = 31251, n128 = 1, n256 = 122\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\n> array_load_ms | rt_search_ms | array_serach_ms\n> ---------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 1000000 | 10199040 | 180000000 | 87 |\n> | 462 |\n> (1 row)\n>\n> 1590104944 branches:u # 3.430 G/sec\n> 65957 branch-misses:u # 0.00% of all branches\n>\n> postgres(1:361692)=# select * from bench_seq_search(0, 2 * 1000 * 1000, true);\n> 2022-10-14 11:33:28.934 JST [361692] LOG: num_keys = 999654, height =\n> 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\n> array_load_ms | rt_search_ms | array_serach_ms\n> --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 999654 | 14893056 | 179937720 | 91 |\n> | 497 |\n> (1 row)\n>\n> 1748249456 branches:u # 3.506 G/sec\n> 481074 branch-misses:u # 0.03% of all branches\n>\n> postgres(1:361692)=# select * from bench_shuffle_search(0, 1 * 1000 *\n> 1000, false);\n> 2022-10-14 11:33:38.378 JST [361692] LOG: num_keys = 1000000, height\n> = 2, n4 = 0, n16 = 0, n32 = 31251, n128 = 1, n256 = 122\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\n> array_load_ms | rt_search_ms | array_serach_ms\n> ---------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 1000000 | 10199040 | 180000000 | 86 |\n> | 1290 |\n> (1 row)\n>\n> 1590105370 branches:u # 1.231 G/sec\n> 15039443 branch-misses:u # 0.95% of all branches\n>\n> Time: 4166.346 ms (00:04.166)\n> postgres(1:361692)=# select * from bench_shuffle_search(0, 2 * 1000 *\n> 1000, true);\n> 2022-10-14 11:33:51.556 JST [361692] LOG: num_keys = 999654, height =\n> 2, n4 = 1, n16 = 35610, n32 = 26889, n128 = 1, n256 = 245\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\n> array_load_ms | rt_search_ms | array_serach_ms\n> --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 999654 | 14893056 | 179937720 | 90 |\n> | 1536 |\n> (1 row)\n>\n> 1748250497 branches:u # 1.137 G/sec\n> 28125016 branch-misses:u # 1.61% of all branches\n>\n> * w/ all patches\n> postgres(1:360358)=# select * from bench_seq_search(0, 1 * 1000 * 1000, false);\n> 2022-10-14 11:29:27.232 JST [360358] LOG: num_keys = 1000000, height\n> = 2, n4 = 0, n32 = 31251, n128 = 1, n256 = 122\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\n> array_load_ms | rt_search_ms | array_serach_ms\n> ---------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 1000000 | 10199040 | 180000000 | 81 |\n> | 432 |\n> (1 row)\n>\n> 1380062209 branches:u # 3.185 G/sec\n> 1066 branch-misses:u # 0.00% of all branches\n>\n> postgres(1:360358)=# select * from bench_seq_search(0, 2 * 1000 * 1000, true);\n> 2022-10-14 11:29:46.380 JST [360358] LOG: num_keys = 999654, height =\n> 2, n4 = 1, n32 = 62499, n128 = 1, n256 = 245\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\n> array_load_ms | rt_search_ms | array_serach_ms\n> --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 999654 | 20381696 | 179937720 | 88 |\n> | 438 |\n> (1 row)\n>\n> 1379640815 branches:u # 3.133 G/sec\n> 1332 branch-misses:u # 0.00% of all branches\n>\n> postgres(1:360358)=# select * from bench_shuffle_search(0, 1 * 1000 *\n> 1000, false);\n> 2022-10-14 11:30:00.943 JST [360358] LOG: num_keys = 1000000, height\n> = 2, n4 = 0, n32 = 31251, n128 = 1, n256 = 122\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\n> array_load_ms | rt_search_ms | array_serach_ms\n> ---------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 1000000 | 10199040 | 180000000 | 81 |\n> | 994 |\n> (1 row)\n>\n> 1380062386 branches:u # 1.386 G/sec\n> 18368 branch-misses:u # 0.00% of all branches\n>\n> postgres(1:360358)=# select * from bench_shuffle_search(0, 2 * 1000 *\n> 1000, true);\n> 2022-10-14 11:30:15.944 JST [360358] LOG: num_keys = 999654, height =\n> 2, n4 = 1, n32 = 62499, n128 = 1, n256 = 245\n> NOTICE: sleeping for 2 seconds...\n> nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\n> array_load_ms | rt_search_ms | array_serach_ms\n> --------+------------------+---------------------+------------+---------------+--------------+-----------------\n> 999654 | 20381696 | 179937720 | 88 |\n> | 1098 |\n> (1 row)\n>\n> 1379641503 branches:u # 1.254 G/sec\n> 18973 branch-misses:u # 0.00% of all branches\n>\n> > I haven't looked at insert/load performance carefully, but it's clear it suffers from the same amnesia. prepare_node_for_insert() branches based on the kind. If it must call rt_node_grow(), that function has no idea where it came from and must branch again. When prepare_node_for_insert() returns we again have no idea what the kind is, so must branch again. And if we are one of the three linear-search nodes, we later do another function call, where we encounter a 5-way jump table because the caller could be anything at all.\n> >\n> > Some of this could be worked around with always-inline functions to which we pass a const node kind, and let the compiler get rid of the branches etc. But many cases are probably not even worth doing that. For example, I don't think prepare_node_for_insert() is a useful abstraction to begin with. It returns an index, but only for linear nodes. Lookup nodes get a return value of zero. There is not enough commonality here.\n>\n> Agreed.\n>\n> >\n> > Along the same lines, there are a number of places that have branches as a consequence of treating inner nodes and leaves with the same api:\n> >\n> > rt_node_iterate_next\n> > chunk_array_node_get_slot\n> > node_128/256_get_slot\n> > rt_node_search\n> >\n> > I'm leaning towards splitting these out into specialized functions for each inner and leaf. This is a bit painful for the last one, but perhaps if we are resigned to templating the shared-mem case, maybe we can template some of the inner/leaf stuff. Something to think about for later, but for now I believe we have to accept some code duplication as a prerequisite for decent performance as well as readability.\n>\n> Agreed.\n>\n> >\n> > For the next steps, we need to proceed cautiously because there is a lot in the air at the moment. Here are some aspects I would find desirable. If there are impracticalities I haven't thought of, we can discuss further. I don't pretend to know the practical consequences of every change I mention.\n> >\n> > - If you have started coding the shared memory case, I'd advise to continue so we can see what that looks like. If that has not gotten beyond the design stage, I'd like to first see an attempt at tearing down some of the clumsier abstractions in the current patch.\n> > - As a \"smoke test\", there should ideally be nothing as general as rt_node_get_children/values(). We should ideally always know what kind we are if we found out earlier.\n> > - For distinguishing between linear nodes, perhaps some always-inline functions can help hide details. But at the same time, trying to treat them the same is not always worthwhile.\n> > - Start to separate treatment of inner/leaves and see how it goes.\n>\n> Since I've not started coding the shared memory case seriously, I'm\n> going to start with eliminating abstractions and splitting the\n> treatment of inner and leaf nodes.\n\nI've attached updated PoC patches for discussion and cfbot. From the\nprevious version, I mainly changed the following things:\n\n* Separate treatment of inner and leaf nodes\n* Pack both the node kind and node count to an uint16 value.\n\nI've also made a change in functions in bench_radix_tree test module:\nthe third argument of bench_seq/shuffle_search() is a flag to turn on\nand off the randomizes block selection. The results of performance\ntests in my environment are:\n\npostgres(1:1665989)=# select * from bench_seq_search(0, 1* 1000 * 1000, false);\n2022-10-24 14:29:40.705 JST [1665989] LOG: num_keys = 1000000, height\n= 2, n4 = 0, n32 = 31251, n128 = 1, n256 = 122\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 9871104 | 180000000 | 65 |\n | 248 |\n(1 row)\n\npostgres(1:1665989)=# select * from bench_seq_search(0, 2* 1000 * 1000, true);\n2022-10-24 14:29:47.999 JST [1665989] LOG: num_keys = 999654, height\n= 2, n4 = 1, n32 = 62499, n128 = 1, n256 = 245\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 19680736 | 179937720 | 71 |\n | 237 |\n(1 row)\n\npostgres(1:1665989)=# select * from bench_shuffle_search(0, 1 * 1000 *\n1000, false);\n2022-10-24 14:29:55.955 JST [1665989] LOG: num_keys = 1000000, height\n= 2, n4 = 0, n32 = 31251, n128 = 1, n256 = 122\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 9871104 | 180000000 | 65 |\n | 641 |\n(1 row)\n\npostgres(1:1665989)=# select * from bench_shuffle_search(0, 2 * 1000 *\n1000, true);\n2022-10-24 14:30:04.140 JST [1665989] LOG: num_keys = 999654, height\n= 2, n4 = 1, n32 = 62499, n128 = 1, n256 = 245\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 19680736 | 179937720 | 71 |\n | 654 |\n(1 row)\n\nI've not done SIMD part seriously yet. But overall the performance\nseems good so far. If we agree with the current approach, I think we\ncan proceed with the verification of decoupling node sizes from node\nkind. And I'll investigate DSA support.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 24 Oct 2022 14:53:36 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Oct 24, 2022 at 12:54 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> I've attached updated PoC patches for discussion and cfbot. From the\n> previous version, I mainly changed the following things:\n>\n> * Separate treatment of inner and leaf nodes\n\nOverall, this looks much better!\n\n> * Pack both the node kind and node count to an uint16 value.\n\nFor this, I did mention a bitfield earlier as something we \"could\" do, but\nit wasn't clear we should. After looking again at the node types, I must\nnot have thought through this at all. Storing one byte instead of four for\nthe full enum is a good step, but saving one more byte usually doesn't buy\nanything because of padding, with a few exceptions like this example:\n\nnode4: 4 + 4 + 4*8 = 40\nnode4: 5 + 4+(7) + 4*8 = 48 bytes\n\nEven there, I'd rather not spend the extra cycles to access the members.\nAnd with my idea of decoupling size classes from kind, the variable-sized\nkinds will require another byte to store \"capacity\". Then, even if the kind\ngets encoded in a pointer tag, we'll still have 5 bytes in the base type.\nSo I think we should assume 5 bytes from the start. (Might be 6 temporarily\nif I work on size decoupling first).\n\n(Side note, if you have occasion to use bitfields again in the future, C99\nhas syntactic support for them, so no need to write your own\nshifting/masking code).\n\n> I've not done SIMD part seriously yet. But overall the performance\n> seems good so far. If we agree with the current approach, I think we\n> can proceed with the verification of decoupling node sizes from node\n> kind. And I'll investigate DSA support.\n\nSounds good. I have some additional comments about v7, and after these are\naddressed, we can proceed independently with the above two items. Seeing\nthe DSA work will also inform me how invasive pointer tagging will be.\nThere will still be some performance tuning and cosmetic work, but it's\ngetting closer.\n\n-------------------------\n0001:\n\n+#ifndef USE_NO_SIMD\n+#include \"port/pg_bitutils.h\"\n+#endif\n\nLeftover from an earlier version?\n\n+static inline int vector8_find(const Vector8 v, const uint8 c);\n+static inline int vector8_find_ge(const Vector8 v, const uint8 c);\n\nLeftovers, causing compiler warnings. (Also see new variable shadow warning)\n\n+#else /* USE_NO_SIMD */\n+ Vector8 r = 0;\n+ uint8 *rp = (uint8 *) &r;\n+\n+ for (Size i = 0; i < sizeof(Vector8); i++)\n+ rp[i] = Min(((const uint8 *) &v1)[i], ((const uint8 *) &v2)[i]);\n+\n+ return r;\n+#endif\n\nAs I mentioned a couple versions ago, this style is really awkward, and\npotential non-SIMD callers will be better off writing their own byte-wise\nloop rather than using this API. Especially since the \"min\" function exists\nonly as a workaround for lack of unsigned comparison in (at least) SSE2.\nThere is one existing function in this file with that idiom for non-assert\ncode (for completeness), but even there, inputs of current interest to us\nuse the uint64 algorithm.\n\n0002:\n\n+ /* XXX: should not to use vector8_highbit_mask */\n+ bitfield = vector8_highbit_mask(cmp1) | (vector8_highbit_mask(cmp2) <<\nsizeof(Vector8));\n\nHmm?\n\n+/*\n+ * Return index of the first element in chunks in the given node that is\ngreater\n+ * than or equal to 'key'. Return -1 if there is no such element.\n+ */\n+static inline int\n+node_32_search_ge(rt_node_base_32 *node, uint8 chunk)\n\nThe caller must now have logic for inserting at the end:\n\n+ int insertpos = node_32_search_ge((rt_node_base_32 *) n32, chunk);\n+ int16 count = NODE_GET_COUNT(n32);\n+\n+ if (insertpos < 0)\n+ insertpos = count; /* insert to the tail */\n\nIt would be a bit more clear if node_*_search_ge() always returns the\nposition we need (see the prototype for example). In fact, these functions\nare probably better named node*_get_insertpos().\n\n+ if (likely(NODE_HAS_FREE_SLOT(n128)))\n+ {\n+ node_inner_128_insert(n128, chunk, child);\n+ break;\n+ }\n+\n+ /* grow node from 128 to 256 */\n\nWe want all the node-growing code to be pushed down to the bottom so that\nall branches of the hot path are close together. This provides better\nlocality for the CPU frontend. Looking at the assembly, the above doesn't\nhave the desired effect, so we need to write like this (also see prototype):\n\nif (unlikely( ! has-free-slot))\n grow-node;\nelse\n{\n ...;\n break;\n}\n/* FALLTHROUGH */\n\n+ /* Descend the tree until a leaf node */\n+ while (shift >= 0)\n+ {\n+ rt_node *child;\n+\n+ if (NODE_IS_LEAF(node))\n+ break;\n+\n+ if (!rt_node_search_inner(node, key, RT_ACTION_FIND, &child))\n+ child = rt_node_add_new_child(tree, parent, node, key);\n+\n+ Assert(child);\n+\n+ parent = node;\n+ node = child;\n+ shift -= RT_NODE_SPAN;\n+ }\n\nNote that if we have to call rt_node_add_new_child(), each successive loop\niteration must search it and find nothing there (the prototype had a\nseparate function to handle this). Maybe it's not that critical yet, but\nsomething to keep in mind as we proceed. Maybe a comment about it to remind\nus.\n\n+ /* there is no key to delete */\n+ if (!rt_node_search_leaf(node, key, RT_ACTION_FIND, NULL))\n+ return false;\n+\n+ /* Update the statistics */\n+ tree->num_keys--;\n+\n+ /*\n+ * Delete the key from the leaf node and recursively delete the key in\n+ * inner nodes if necessary.\n+ */\n+ Assert(NODE_IS_LEAF(stack[level]));\n+ while (level >= 0)\n+ {\n+ rt_node *node = stack[level--];\n+\n+ if (NODE_IS_LEAF(node))\n+ rt_node_search_leaf(node, key, RT_ACTION_DELETE, NULL);\n+ else\n+ rt_node_search_inner(node, key, RT_ACTION_DELETE, NULL);\n+\n+ /* If the node didn't become empty, we stop deleting the key */\n+ if (!NODE_IS_EMPTY(node))\n+ break;\n+\n+ /* The node became empty */\n+ rt_free_node(tree, node);\n+ }\n\nHere we call rt_node_search_leaf() twice -- once to check for existence,\nand once to delete. All three search calls are inlined, so this wastes\nspace. Let's try to delete the leaf, return if not found, otherwise handle\nthe leaf bookkeepping and loop over the inner nodes. This might require\nsome duplication of code.\n\n+ndoe_inner_128_update(rt_node_inner_128 *node, uint8 chunk, rt_node *child)\n\nSpelling\n\n+static inline void\n+chunk_children_array_copy(uint8 *src_chunks, rt_node **src_children,\n+ uint8 *dst_chunks, rt_node **dst_children, int count)\n+{\n+ memcpy(dst_chunks, src_chunks, sizeof(uint8) * count);\n+ memcpy(dst_children, src_children, sizeof(rt_node *) * count);\n+}\n\ngcc generates better code with something like this (but not hard-coded) at\nthe top:\n\n if (count > 4)\n pg_unreachable();\n\nThis would have to change when we implement shrinking of nodes, but might\nstill be useful.\n\n+ if (!rt_node_search_leaf(node, key, RT_ACTION_FIND, value_p))\n+ return false;\n+\n+ return true;\n\nMaybe just \"return rt_node_search_leaf(...)\" ?\n\n-- \nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Oct 24, 2022 at 12:54 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> I've attached updated PoC patches for discussion and cfbot. From the> previous version, I mainly changed the following things:>> * Separate treatment of inner and leaf nodesOverall, this looks much better!> * Pack both the node kind and node count to an uint16 value.For this, I did mention a bitfield earlier as something we \"could\" do, but it wasn't clear we should. After looking again at the node types, I must not have thought through this at all. Storing one byte instead of four for the full enum is a good step, but saving one more byte usually doesn't buy anything because of padding, with a few exceptions like this example:node4:   4 +  4           +  4*8 =   40node4:   5 +  4+(7)       +  4*8 =   48 bytesEven there, I'd rather not spend the extra cycles to access the members. And with my idea of decoupling size classes from kind, the variable-sized kinds will require another byte to store \"capacity\". Then, even if the kind gets encoded in a pointer tag, we'll still have 5 bytes in the base type. So I think we should assume 5 bytes from the start. (Might be 6 temporarily if I work on size decoupling first).(Side note, if you have occasion to use bitfields again in the future, C99 has syntactic support for them, so no need to write your own shifting/masking code).> I've not done SIMD part seriously yet. But overall the performance> seems good so far. If we agree with the current approach, I think we> can proceed with the verification of decoupling node sizes from node> kind. And I'll investigate DSA support.Sounds good. I have some additional comments about v7, and after these are addressed, we can proceed independently with the above two items. Seeing the DSA work will also inform me how invasive pointer tagging will be. There will still be some performance tuning and cosmetic work, but it's getting closer.-------------------------0001:+#ifndef USE_NO_SIMD+#include \"port/pg_bitutils.h\"+#endifLeftover from an earlier version?+static inline int vector8_find(const Vector8 v, const uint8 c);+static inline int vector8_find_ge(const Vector8 v, const uint8 c);Leftovers, causing compiler warnings. (Also see new variable shadow warning)+#else /* USE_NO_SIMD */+\tVector8 r = 0;+\tuint8 *rp = (uint8 *) &r;++\tfor (Size i = 0; i < sizeof(Vector8); i++)+\t\trp[i] = Min(((const uint8 *) &v1)[i], ((const uint8 *) &v2)[i]);++\treturn r;+#endifAs I mentioned a couple versions ago, this style is really awkward, and potential non-SIMD callers will be better off writing their own byte-wise loop rather than using this API. Especially since the \"min\" function exists only as a workaround for lack of unsigned comparison in (at least) SSE2. There is one existing function in this file with that idiom for non-assert code (for completeness), but even there, inputs of current interest to us use the uint64 algorithm.0002:+\t/* XXX: should not to use vector8_highbit_mask */+\tbitfield = vector8_highbit_mask(cmp1) | (vector8_highbit_mask(cmp2) << sizeof(Vector8));Hmm?+/*+ * Return index of the first element in chunks in the given node that is greater+ * than or equal to 'key'.  Return -1 if there is no such element.+ */+static inline int+node_32_search_ge(rt_node_base_32 *node, uint8 chunk)The caller must now have logic for inserting at the end:+\t\t\t\t\tint\tinsertpos = node_32_search_ge((rt_node_base_32 *) n32, chunk);+\t\t\t\t\tint16 count = NODE_GET_COUNT(n32);++\t\t\t\t\tif (insertpos < 0)+\t\t\t\t\t\tinsertpos = count; /* insert to the tail */It would be a bit more clear if node_*_search_ge() always returns the position we need (see the prototype for example). In fact, these functions are probably better named node*_get_insertpos().+\t\t\t\tif (likely(NODE_HAS_FREE_SLOT(n128)))+\t\t\t\t{+\t\t\t\t\tnode_inner_128_insert(n128, chunk, child);+\t\t\t\t\tbreak;+\t\t\t\t}++\t\t\t\t/* grow node from 128 to 256 */We want all the node-growing code to be pushed down to the bottom so that all branches of the hot path are close together. This provides better locality for the CPU frontend. Looking at the assembly, the above doesn't have the desired effect, so we need to write like this (also see prototype):if (unlikely( ! has-free-slot))  grow-node;else{  ...;  break;}/* FALLTHROUGH */+ /* Descend the tree until a leaf node */+ while (shift >= 0)+ {+   rt_node    *child;++   if (NODE_IS_LEAF(node))+     break;++   if (!rt_node_search_inner(node, key, RT_ACTION_FIND, &child))+     child = rt_node_add_new_child(tree, parent, node, key);++   Assert(child);++   parent = node;+   node = child;+   shift -= RT_NODE_SPAN;+ }Note that if we have to call rt_node_add_new_child(), each successive loop iteration must search it and find nothing there (the prototype had a separate function to handle this). Maybe it's not that critical yet, but something to keep in mind as we proceed. Maybe a comment about it to remind us.+ /* there is no key to delete */+ if (!rt_node_search_leaf(node, key, RT_ACTION_FIND, NULL))+   return false;++ /* Update the statistics */+ tree->num_keys--;++ /*+  * Delete the key from the leaf node and recursively delete the key in+  * inner nodes if necessary.+  */+ Assert(NODE_IS_LEAF(stack[level]));+ while (level >= 0)+ {+   rt_node    *node = stack[level--];++   if (NODE_IS_LEAF(node))+     rt_node_search_leaf(node, key, RT_ACTION_DELETE, NULL);+   else+     rt_node_search_inner(node, key, RT_ACTION_DELETE, NULL);++   /* If the node didn't become empty, we stop deleting the key */+   if (!NODE_IS_EMPTY(node))+     break;++   /* The node became empty */+   rt_free_node(tree, node);+ }Here we call rt_node_search_leaf() twice -- once to check for existence, and once to delete. All three search calls are inlined, so this wastes space. Let's try to delete the leaf, return if not found, otherwise handle the leaf bookkeepping and loop over the inner nodes. This might require some duplication of code.+ndoe_inner_128_update(rt_node_inner_128 *node, uint8 chunk, rt_node *child)Spelling+static inline void+chunk_children_array_copy(uint8 *src_chunks, rt_node **src_children,+             uint8 *dst_chunks, rt_node **dst_children, int count)+{+ memcpy(dst_chunks, src_chunks, sizeof(uint8) * count);+ memcpy(dst_children, src_children, sizeof(rt_node *) * count);+}gcc generates better code with something like this (but not hard-coded) at the top:    if (count > 4)        pg_unreachable();This would have to change when we implement shrinking of nodes, but might still be useful.+ if (!rt_node_search_leaf(node, key, RT_ACTION_FIND, value_p))+   return false;++ return true;Maybe just \"return rt_node_search_leaf(...)\" ?-- John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 26 Oct 2022 18:06:43 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Oct 26, 2022 at 8:06 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Oct 24, 2022 at 12:54 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I've attached updated PoC patches for discussion and cfbot. From the\n> > previous version, I mainly changed the following things:\n> >\n\nThank you for the comments!\n\n> > * Separate treatment of inner and leaf nodes\n>\n> Overall, this looks much better!\n>\n> > * Pack both the node kind and node count to an uint16 value.\n>\n> For this, I did mention a bitfield earlier as something we \"could\" do, but it wasn't clear we should. After looking again at the node types, I must not have thought through this at all. Storing one byte instead of four for the full enum is a good step, but saving one more byte usually doesn't buy anything because of padding, with a few exceptions like this example:\n>\n> node4: 4 + 4 + 4*8 = 40\n> node4: 5 + 4+(7) + 4*8 = 48 bytes\n>\n> Even there, I'd rather not spend the extra cycles to access the members. And with my idea of decoupling size classes from kind, the variable-sized kinds will require another byte to store \"capacity\". Then, even if the kind gets encoded in a pointer tag, we'll still have 5 bytes in the base type. So I think we should assume 5 bytes from the start. (Might be 6 temporarily if I work on size decoupling first).\n\nTrue. I'm going to start with 6 bytes and will consider reducing it to\n5 bytes. Encoding the kind in a pointer tag could be tricky given DSA\nsupport so currently I'm thinking to pack the node kind and node\ncapacity classes to uint8.\n\n>\n> (Side note, if you have occasion to use bitfields again in the future, C99 has syntactic support for them, so no need to write your own shifting/masking code).\n\nThanks!\n\n>\n> > I've not done SIMD part seriously yet. But overall the performance\n> > seems good so far. If we agree with the current approach, I think we\n> > can proceed with the verification of decoupling node sizes from node\n> > kind. And I'll investigate DSA support.\n>\n> Sounds good. I have some additional comments about v7, and after these are addressed, we can proceed independently with the above two items. Seeing the DSA work will also inform me how invasive pointer tagging will be. There will still be some performance tuning and cosmetic work, but it's getting closer.\n>\n\nI've made some progress on investigating DSA support. I've written\ndraft patch for that and regression tests passed. I'll share it as a\nseparate patch for discussion with v8 radix tree patch.\n\nWhile implementing DSA support, I realized that we may not need to use\npointer tagging to distinguish between backend-local address or\ndsa_pointer. In order to get a backend-local address from dsa_pointer,\nwe need to pass dsa_area like:\n\nnode = dsa_get_address(tree->dsa, node_dp);\n\nAs shown above, the dsa area used by the shared radix tree is stored\nin radix_tree struct, so we can know whether the radix tree is shared\nor not by checking (tree->dsa == NULL). That is, if it's shared we use\na pointer to radix tree node as dsa_pointer, and if not we use a\npointer as a backend-local pointer. We don't need to encode something\nin a pointer.\n\n> -------------------------\n> 0001:\n>\n> +#ifndef USE_NO_SIMD\n> +#include \"port/pg_bitutils.h\"\n> +#endif\n>\n> Leftover from an earlier version?\n>\n> +static inline int vector8_find(const Vector8 v, const uint8 c);\n> +static inline int vector8_find_ge(const Vector8 v, const uint8 c);\n>\n> Leftovers, causing compiler warnings. (Also see new variable shadow warning)\n\nWill fix.\n\n>\n> +#else /* USE_NO_SIMD */\n> + Vector8 r = 0;\n> + uint8 *rp = (uint8 *) &r;\n> +\n> + for (Size i = 0; i < sizeof(Vector8); i++)\n> + rp[i] = Min(((const uint8 *) &v1)[i], ((const uint8 *) &v2)[i]);\n> +\n> + return r;\n> +#endif\n>\n> As I mentioned a couple versions ago, this style is really awkward, and potential non-SIMD callers will be better off writing their own byte-wise loop rather than using this API. Especially since the \"min\" function exists only as a workaround for lack of unsigned comparison in (at least) SSE2. There is one existing function in this file with that idiom for non-assert code (for completeness), but even there, inputs of current interest to us use the uint64 algorithm.\n\nAgreed. Will remove non-SIMD code.\n\n>\n> 0002:\n>\n> + /* XXX: should not to use vector8_highbit_mask */\n> + bitfield = vector8_highbit_mask(cmp1) | (vector8_highbit_mask(cmp2) << sizeof(Vector8));\n>\n> Hmm?\n\nIt's my outdated memo, will remove.\n\n>\n> +/*\n> + * Return index of the first element in chunks in the given node that is greater\n> + * than or equal to 'key'. Return -1 if there is no such element.\n> + */\n> +static inline int\n> +node_32_search_ge(rt_node_base_32 *node, uint8 chunk)\n>\n> The caller must now have logic for inserting at the end:\n>\n> + int insertpos = node_32_search_ge((rt_node_base_32 *) n32, chunk);\n> + int16 count = NODE_GET_COUNT(n32);\n> +\n> + if (insertpos < 0)\n> + insertpos = count; /* insert to the tail */\n>\n> It would be a bit more clear if node_*_search_ge() always returns the position we need (see the prototype for example). In fact, these functions are probably better named node*_get_insertpos().\n\nAgreed.\n\n>\n> + if (likely(NODE_HAS_FREE_SLOT(n128)))\n> + {\n> + node_inner_128_insert(n128, chunk, child);\n> + break;\n> + }\n> +\n> + /* grow node from 128 to 256 */\n>\n> We want all the node-growing code to be pushed down to the bottom so that all branches of the hot path are close together. This provides better locality for the CPU frontend. Looking at the assembly, the above doesn't have the desired effect, so we need to write like this (also see prototype):\n>\n> if (unlikely( ! has-free-slot))\n> grow-node;\n> else\n> {\n> ...;\n> break;\n> }\n> /* FALLTHROUGH */\n\nGood point. Will change.\n\n>\n> + /* Descend the tree until a leaf node */\n> + while (shift >= 0)\n> + {\n> + rt_node *child;\n> +\n> + if (NODE_IS_LEAF(node))\n> + break;\n> +\n> + if (!rt_node_search_inner(node, key, RT_ACTION_FIND, &child))\n> + child = rt_node_add_new_child(tree, parent, node, key);\n> +\n> + Assert(child);\n> +\n> + parent = node;\n> + node = child;\n> + shift -= RT_NODE_SPAN;\n> + }\n>\n> Note that if we have to call rt_node_add_new_child(), each successive loop iteration must search it and find nothing there (the prototype had a separate function to handle this). Maybe it's not that critical yet, but something to keep in mind as we proceed. Maybe a comment about it to remind us.\n\nAgreed. Currently rt_extend() is used to add upper nodes but probably\nwe need another function to add lower nodes for this case.\n\n>\n> + /* there is no key to delete */\n> + if (!rt_node_search_leaf(node, key, RT_ACTION_FIND, NULL))\n> + return false;\n> +\n> + /* Update the statistics */\n> + tree->num_keys--;\n> +\n> + /*\n> + * Delete the key from the leaf node and recursively delete the key in\n> + * inner nodes if necessary.\n> + */\n> + Assert(NODE_IS_LEAF(stack[level]));\n> + while (level >= 0)\n> + {\n> + rt_node *node = stack[level--];\n> +\n> + if (NODE_IS_LEAF(node))\n> + rt_node_search_leaf(node, key, RT_ACTION_DELETE, NULL);\n> + else\n> + rt_node_search_inner(node, key, RT_ACTION_DELETE, NULL);\n> +\n> + /* If the node didn't become empty, we stop deleting the key */\n> + if (!NODE_IS_EMPTY(node))\n> + break;\n> +\n> + /* The node became empty */\n> + rt_free_node(tree, node);\n> + }\n>\n> Here we call rt_node_search_leaf() twice -- once to check for existence, and once to delete. All three search calls are inlined, so this wastes space. Let's try to delete the leaf, return if not found, otherwise handle the leaf bookkeepping and loop over the inner nodes. This might require some duplication of code.\n\nAgreed.\n\n>\n> +ndoe_inner_128_update(rt_node_inner_128 *node, uint8 chunk, rt_node *child)\n>\n> Spelling\n\nWIll fix.\n\n>\n> +static inline void\n> +chunk_children_array_copy(uint8 *src_chunks, rt_node **src_children,\n> + uint8 *dst_chunks, rt_node **dst_children, int count)\n> +{\n> + memcpy(dst_chunks, src_chunks, sizeof(uint8) * count);\n> + memcpy(dst_children, src_children, sizeof(rt_node *) * count);\n> +}\n>\n> gcc generates better code with something like this (but not hard-coded) at the top:\n>\n> if (count > 4)\n> pg_unreachable();\n\nAgreed.\n\n>\n> This would have to change when we implement shrinking of nodes, but might still be useful.\n>\n> + if (!rt_node_search_leaf(node, key, RT_ACTION_FIND, value_p))\n> + return false;\n> +\n> + return true;\n>\n> Maybe just \"return rt_node_search_leaf(...)\" ?\n\nAgreed.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 27 Oct 2022 11:11:15 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Oct 27, 2022 at 9:11 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> True. I'm going to start with 6 bytes and will consider reducing it to\n> 5 bytes.\n\nOkay, let's plan on 6 for now, so we have the worst-case sizes up front. As\ndiscussed, I will attempt the size class decoupling after v8 and see how it\ngoes.\n\n> Encoding the kind in a pointer tag could be tricky given DSA\n\nIf it turns out to be unworkable, that's life. If it's just tricky, that\ncan certainly be put off for future work. I hope to at least test it out\nwith local memory.\n\n> support so currently I'm thinking to pack the node kind and node\n> capacity classes to uint8.\n\nThat won't work, if we need 128 for capacity, leaving no bits left. I want\nthe capacity to be a number we can directly compare with the count (we\nwon't ever need to store 256 because that node will never grow). Also,\nfurther to my last message, we need to access the kind quickly, without\nmore cycles.\n\n> I've made some progress on investigating DSA support. I've written\n> draft patch for that and regression tests passed. I'll share it as a\n> separate patch for discussion with v8 radix tree patch.\n\nGreat!\n\n> While implementing DSA support, I realized that we may not need to use\n> pointer tagging to distinguish between backend-local address or\n> dsa_pointer. In order to get a backend-local address from dsa_pointer,\n> we need to pass dsa_area like:\n\nI was not clear -- when I see how much code changes to accommodate DSA\npointers, I imagine I will pretty much know the places that would be\naffected by tagging the pointer with the node kind.\n\nSpeaking of tests, there is currently no Meson support, but tests pass\nbecause this library is not used anywhere in the backend yet, and\napparently the CI Meson builds don't know to run the regression test? That\nwill need to be done too. However, it's okay to keep the benchmarking\nmodule in autoconf, since it won't be committed.\n\n> > +static inline void\n> > +chunk_children_array_copy(uint8 *src_chunks, rt_node **src_children,\n> > + uint8 *dst_chunks, rt_node **dst_children, int count)\n> > +{\n> > + memcpy(dst_chunks, src_chunks, sizeof(uint8) * count);\n> > + memcpy(dst_children, src_children, sizeof(rt_node *) * count);\n> > +}\n> >\n> > gcc generates better code with something like this (but not hard-coded)\nat the top:\n> >\n> > if (count > 4)\n> > pg_unreachable();\n\nActually it just now occurred to me there's a bigger issue here: *We* know\nthis code can only get here iff count==4, so why doesn't the compiler know\nthat? I believe it boils down to\n\nstatic rt_node_kind_info_elem rt_node_kind_info[RT_NODE_KIND_COUNT] = {\n\nIn the assembly, I see it checks if there is room in the node by doing a\nruntime lookup in this array, which is not constant. This might not be\nimportant just yet, because I want to base the check on the proposed node\ncapacity instead, but I mention it as a reminder to us to make sure we take\nall opportunities for the compiler to propagate constants.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Oct 27, 2022 at 9:11 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> True. I'm going to start with 6 bytes and will consider reducing it to> 5 bytes.Okay, let's plan on 6 for now, so we have the worst-case sizes up front. As discussed, I will attempt the size class decoupling after v8 and see how it goes.> Encoding the kind in a pointer tag could be tricky given DSAIf it turns out to be unworkable, that's life. If it's just tricky, that can certainly be put off for future work. I hope to at least test it out with local memory.> support so currently I'm thinking to pack the node kind and node> capacity classes to uint8.That won't work, if we need 128 for capacity, leaving no bits left. I want the capacity to be a number we can directly compare with the count (we won't ever need to store 256 because that node will never grow). Also, further to my last message, we need to access the kind quickly, without more cycles.> I've made some progress on investigating DSA support. I've written> draft patch for that and regression tests passed. I'll share it as a> separate patch for discussion with v8 radix tree patch.Great!> While implementing DSA support, I realized that we may not need to use> pointer tagging to distinguish between backend-local address or> dsa_pointer. In order to get a backend-local address from dsa_pointer,> we need to pass dsa_area like:I was not clear -- when I see how much code changes to accommodate DSA pointers, I imagine I will pretty much know the places that would be affected by tagging the pointer with the node kind.Speaking of tests, there is currently no Meson support, but tests pass because this library is not used anywhere in the backend yet, and apparently the CI Meson builds don't know to run the regression test? That will need to be done too. However, it's okay to keep the benchmarking module in autoconf, since it won't be committed.> > +static inline void> > +chunk_children_array_copy(uint8 *src_chunks, rt_node **src_children,> > +             uint8 *dst_chunks, rt_node **dst_children, int count)> > +{> > + memcpy(dst_chunks, src_chunks, sizeof(uint8) * count);> > + memcpy(dst_children, src_children, sizeof(rt_node *) * count);> > +}> >> > gcc generates better code with something like this (but not hard-coded) at the top:> >> >     if (count > 4)> >         pg_unreachable();Actually it just now occurred to me there's a bigger issue here: *We* know this code can only get here iff count==4, so why doesn't the compiler know that? I believe it boils down tostatic rt_node_kind_info_elem rt_node_kind_info[RT_NODE_KIND_COUNT] = {In the assembly, I see it checks if there is room in the node by doing a runtime lookup in this array, which is not constant. This might not be important just yet, because I want to base the check on the proposed node capacity instead, but I mention it as a reminder to us to make sure we take all opportunities for the compiler to propagate constants.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 27 Oct 2022 10:21:40 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Oct 27, 2022 at 12:21 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Thu, Oct 27, 2022 at 9:11 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > True. I'm going to start with 6 bytes and will consider reducing it to\n> > 5 bytes.\n>\n> Okay, let's plan on 6 for now, so we have the worst-case sizes up front. As discussed, I will attempt the size class decoupling after v8 and see how it goes.\n>\n> > Encoding the kind in a pointer tag could be tricky given DSA\n>\n> If it turns out to be unworkable, that's life. If it's just tricky, that can certainly be put off for future work. I hope to at least test it out with local memory.\n>\n> > support so currently I'm thinking to pack the node kind and node\n> > capacity classes to uint8.\n>\n> That won't work, if we need 128 for capacity, leaving no bits left. I want the capacity to be a number we can directly compare with the count (we won't ever need to store 256 because that node will never grow). Also, further to my last message, we need to access the kind quickly, without more cycles.\n\nUnderstood.\n\n>\n> > I've made some progress on investigating DSA support. I've written\n> > draft patch for that and regression tests passed. I'll share it as a\n> > separate patch for discussion with v8 radix tree patch.\n>\n> Great!\n>\n> > While implementing DSA support, I realized that we may not need to use\n> > pointer tagging to distinguish between backend-local address or\n> > dsa_pointer. In order to get a backend-local address from dsa_pointer,\n> > we need to pass dsa_area like:\n>\n> I was not clear -- when I see how much code changes to accommodate DSA pointers, I imagine I will pretty much know the places that would be affected by tagging the pointer with the node kind.\n>\n> Speaking of tests, there is currently no Meson support, but tests pass because this library is not used anywhere in the backend yet, and apparently the CI Meson builds don't know to run the regression test? That will need to be done too. However, it's okay to keep the benchmarking module in autoconf, since it won't be committed.\n\nUpdated to support Meson.\n\n>\n> > > +static inline void\n> > > +chunk_children_array_copy(uint8 *src_chunks, rt_node **src_children,\n> > > + uint8 *dst_chunks, rt_node **dst_children, int count)\n> > > +{\n> > > + memcpy(dst_chunks, src_chunks, sizeof(uint8) * count);\n> > > + memcpy(dst_children, src_children, sizeof(rt_node *) * count);\n> > > +}\n> > >\n> > > gcc generates better code with something like this (but not hard-coded) at the top:\n> > >\n> > > if (count > 4)\n> > > pg_unreachable();\n>\n> Actually it just now occurred to me there's a bigger issue here: *We* know this code can only get here iff count==4, so why doesn't the compiler know that? I believe it boils down to\n>\n> static rt_node_kind_info_elem rt_node_kind_info[RT_NODE_KIND_COUNT] = {\n>\n> In the assembly, I see it checks if there is room in the node by doing a runtime lookup in this array, which is not constant. This might not be important just yet, because I want to base the check on the proposed node capacity instead, but I mention it as a reminder to us to make sure we take all opportunities for the compiler to propagate constants.\n\nI've attached v8 patches. 0001, 0002, and 0003 patches incorporated\nthe comments I got so far. 0004 patch is a DSA support patch for PoC.\n\nIn 0004 patch, the basic idea is to use rt_node_ptr in all inner nodes\nto point its children, and we use rt_node_ptr as either rt_node* or\ndsa_pointer depending on whether the radix tree is shared or not (ie,\nby checking radix_tree->dsa == NULL). Regarding the performance, I've\nadded another boolean argument to bench_seq/shuffle_search(),\nspecifying whether to use the shared radix tree or not. Here are\nbenchmark results in my environment,\n\nselect * from bench_seq_search(0, 1* 1000 * 1000, false, false);\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 9871240 | 180000000 | 67 |\n | 241 |\n(1 row)\n\nselect * from bench_seq_search(0, 1* 1000 * 1000, false, true);\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 14680064 | 180000000 | 81 |\n | 483 |\n(1 row)\n\nselect * from bench_seq_search(0, 2* 1000 * 1000, true, false);\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 19680872 | 179937720 | 74 |\n | 235 |\n(1 row)\n\nselect * from bench_seq_search(0, 2* 1000 * 1000, true, true);\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 23068672 | 179937720 | 86 |\n | 445 |\n(1 row)\n\nselect * from bench_shuffle_search(0, 1* 1000 * 1000, false, false);\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 9871240 | 180000000 | 67 |\n | 640 |\n(1 row)\n\nselect * from bench_shuffle_search(0, 1* 1000 * 1000, false, true);\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 14680064 | 180000000 | 81 |\n | 1002 |\n(1 row)\n\nselect * from bench_shuffle_search(0, 2* 1000 * 1000, true, false);\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 19680872 | 179937720 | 74 |\n | 697 |\n(1 row)\n\nselect * from bench_shuffle_search(0, 2* 1000 * 1000, true, true);\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 23068672 | 179937720 | 86 |\n | 1030 |\n(1 row)\n\nIn non-shared radix tree cases (the forth argument is false), I don't\nsee a visible performance degradation. On the other hand, in shared\nradix tree cases (the forth argument is true), I see visible overheads\nbecause of dsa_get_address().\n\nPlease note that the current shared radix tree implementation doesn't\nsupport any locking, so it cannot be read while written by someone.\nAlso, only one process can iterate over the shared radix tree. When it\ncomes to parallel vacuum, these don't become restriction as the leader\nprocess writes the radix tree while scanning heap and the radix tree\nis read by multiple processes while vacuuming indexes. And only the\nleader process can do heap vacuum by iterating the key-value pairs in\nthe radix tree. If we want to use it for other cases too, we would\nneed to support locking, RCU or something.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 31 Oct 2022 14:46:53 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Oct 31, 2022 at 12:47 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> I've attached v8 patches. 0001, 0002, and 0003 patches incorporated\n> the comments I got so far. 0004 patch is a DSA support patch for PoC.\n\nThanks for the new patchset. This is not a full review, but I have some\ncomments:\n\n0001 and 0002 look okay on a quick scan -- I will use this as a base for\nfurther work that we discussed. However, before I do so I'd like to request\nanother revision regarding the following:\n\n> In 0004 patch, the basic idea is to use rt_node_ptr in all inner nodes\n> to point its children, and we use rt_node_ptr as either rt_node* or\n> dsa_pointer depending on whether the radix tree is shared or not (ie,\n> by checking radix_tree->dsa == NULL).\n\n0004: Looks like a good start, but this patch has a large number of changes\nlike these, making it hard to read:\n\n- if (found && child_p)\n- *child_p = child;\n+ if (found && childp_p)\n+ *childp_p = childp;\n...\n rt_node_inner_32 *new32;\n+ rt_node_ptr new32p;\n\n /* grow node from 4 to 32 */\n- new32 = (rt_node_inner_32 *) rt_copy_node(tree, (rt_node *) n4,\n- RT_NODE_KIND_32);\n+ new32p = rt_copy_node(tree, (rt_node *) n4, RT_NODE_KIND_32);\n+ new32 = (rt_node_inner_32 *) node_ptr_get_local(tree, new32p);\n\nIt's difficult to keep in my head what all the variables refer to. I\nthought a bit about how to split this patch up to make this easier to read.\nHere's what I came up with:\n\ntypedef struct rt_node_ptr\n{\n uintptr_t encoded;\n rt_node * decoded;\n}\n\nNote that there is nothing about \"dsa or local\". That's deliberate. That\nway, we can use the \"encoded\" field for a tagged pointer as well, as I hope\nwe can do (at least for local pointers) in the future. So an intermediate\npatch would have \"static inline void\" functions node_ptr_encode() and\n node_ptr_decode(), which would only copy from one member to another. I\nsuspect that: 1. The actual DSA changes will be *much* smaller and easier\nto reason about. 2. Experimenting with tagged pointers will be easier.\n\nAlso, quick question: 0004 has a new function rt_node_update_inner() -- is\nthat necessary because of DSA?, or does this ideally belong in 0002? What's\nthe reason for it?\n\nRegarding the performance, I've\n> added another boolean argument to bench_seq/shuffle_search(),\n> specifying whether to use the shared radix tree or not. Here are\n> benchmark results in my environment,\n\n> [...]\n\n> In non-shared radix tree cases (the forth argument is false), I don't\n> see a visible performance degradation. On the other hand, in shared\n> radix tree cases (the forth argument is true), I see visible overheads\n> because of dsa_get_address().\n\nThanks, this is useful.\n\n> Please note that the current shared radix tree implementation doesn't\n> support any locking, so it cannot be read while written by someone.\n\nI think at the very least we need a global lock to enforce this.\n\n> Also, only one process can iterate over the shared radix tree. When it\n> comes to parallel vacuum, these don't become restriction as the leader\n> process writes the radix tree while scanning heap and the radix tree\n> is read by multiple processes while vacuuming indexes. And only the\n> leader process can do heap vacuum by iterating the key-value pairs in\n> the radix tree. If we want to use it for other cases too, we would\n> need to support locking, RCU or something.\n\nA useful exercise here is to think about what we'd need to do parallel heap\npruning. We don't need to go that far for v16 of course, but what's the\nsimplest thing we can do to make that possible? Other use cases can change\nto more sophisticated schemes if need be.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Oct 31, 2022 at 12:47 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> I've attached v8 patches. 0001, 0002, and 0003 patches incorporated> the comments I got so far. 0004 patch is a DSA support patch for PoC.Thanks for the new patchset. This is not a full review, but I have some comments:0001 and 0002 look okay on a quick scan -- I will use this as a base for further work that we discussed. However, before I do so I'd like to request another revision regarding the following:> In 0004 patch, the basic idea is to use rt_node_ptr in all inner nodes> to point its children, and we use rt_node_ptr as either rt_node* or> dsa_pointer depending on whether the radix tree is shared or not (ie,> by checking radix_tree->dsa == NULL). 0004: Looks like a good start, but this patch has a large number of changes like these, making it hard to read:-\tif (found && child_p)-\t\t*child_p = child;+\tif (found && childp_p)+\t\t*childp_p = childp;... \t\t\t\t\trt_node_inner_32 *new32;+\t\t\t\t\trt_node_ptr new32p;  \t\t\t\t\t/* grow node from 4 to 32 */-\t\t\t\t\tnew32 = (rt_node_inner_32 *) rt_copy_node(tree, (rt_node *) n4,-\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t  RT_NODE_KIND_32);+\t\t\t\t\tnew32p = rt_copy_node(tree, (rt_node *) n4, RT_NODE_KIND_32);+\t\t\t\t\tnew32 = (rt_node_inner_32 *) node_ptr_get_local(tree, new32p);It's difficult to keep in my head what all the variables refer to. I thought a bit about how to split this patch up to make this easier to read. Here's what I came up with:typedef struct rt_node_ptr{  uintptr_t encoded;  rt_node * decoded;}Note that there is nothing about \"dsa or local\". That's deliberate. That way, we can use the \"encoded\" field for a tagged pointer as well, as I hope we can do (at least for local pointers) in the future. So an intermediate patch would have \"static inline void\" functions  node_ptr_encode() and  node_ptr_decode(), which would only copy from one member to another. I suspect that: 1. The actual DSA changes will be *much* smaller and easier to reason about. 2. Experimenting with tagged pointers will be easier.Also, quick question: 0004 has a new function rt_node_update_inner() -- is that necessary because of DSA?, or does this ideally belong in 0002? What's the reason for it?Regarding the performance, I've> added another boolean argument to bench_seq/shuffle_search(),> specifying whether to use the shared radix tree or not. Here are> benchmark results in my environment,> [...] > In non-shared radix tree cases (the forth argument is false), I don't> see a visible performance degradation. On the other hand, in shared> radix tree cases (the forth argument is true), I see visible overheads> because of dsa_get_address().Thanks, this is useful.> Please note that the current shared radix tree implementation doesn't> support any locking, so it cannot be read while written by someone.I think at the very least we need a global lock to enforce this.> Also, only one process can iterate over the shared radix tree. When it> comes to parallel vacuum, these don't become restriction as the leader> process writes the radix tree while scanning heap and the radix tree> is read by multiple processes while vacuuming indexes. And only the> leader process can do heap vacuum by iterating the key-value pairs in> the radix tree. If we want to use it for other cases too, we would> need to support locking, RCU or something.A useful exercise here is to think about what we'd need to do parallel heap pruning. We don't need to go that far for v16 of course, but what's the simplest thing we can do to make that possible? Other use cases can change to more sophisticated schemes if need be.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 3 Nov 2022 11:59:33 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Nov 3, 2022 at 1:59 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Oct 31, 2022 at 12:47 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > I've attached v8 patches. 0001, 0002, and 0003 patches incorporated\n> > the comments I got so far. 0004 patch is a DSA support patch for PoC.\n>\n> Thanks for the new patchset. This is not a full review, but I have some comments:\n>\n> 0001 and 0002 look okay on a quick scan -- I will use this as a base for further work that we discussed. However, before I do so I'd like to request another revision regarding the following:\n>\n> > In 0004 patch, the basic idea is to use rt_node_ptr in all inner nodes\n> > to point its children, and we use rt_node_ptr as either rt_node* or\n> > dsa_pointer depending on whether the radix tree is shared or not (ie,\n> > by checking radix_tree->dsa == NULL).\n>\n\nThank you for the comments!\n\n> 0004: Looks like a good start, but this patch has a large number of changes like these, making it hard to read:\n>\n> - if (found && child_p)\n> - *child_p = child;\n> + if (found && childp_p)\n> + *childp_p = childp;\n> ...\n> rt_node_inner_32 *new32;\n> + rt_node_ptr new32p;\n>\n> /* grow node from 4 to 32 */\n> - new32 = (rt_node_inner_32 *) rt_copy_node(tree, (rt_node *) n4,\n> - RT_NODE_KIND_32);\n> + new32p = rt_copy_node(tree, (rt_node *) n4, RT_NODE_KIND_32);\n> + new32 = (rt_node_inner_32 *) node_ptr_get_local(tree, new32p);\n>\n> It's difficult to keep in my head what all the variables refer to. I thought a bit about how to split this patch up to make this easier to read. Here's what I came up with:\n>\n> typedef struct rt_node_ptr\n> {\n> uintptr_t encoded;\n> rt_node * decoded;\n> }\n>\n> Note that there is nothing about \"dsa or local\". That's deliberate. That way, we can use the \"encoded\" field for a tagged pointer as well, as I hope we can do (at least for local pointers) in the future. So an intermediate patch would have \"static inline void\" functions node_ptr_encode() and node_ptr_decode(), which would only copy from one member to another. I suspect that: 1. The actual DSA changes will be *much* smaller and easier to reason about. 2. Experimenting with tagged pointers will be easier.\n\nGood idea. Will try in the next version patch.\n\n>\n> Also, quick question: 0004 has a new function rt_node_update_inner() -- is that necessary because of DSA?, or does this ideally belong in 0002? What's the reason for it?\n\nOh, this was needed once when initially I'm writing DSA support but\nthinking about it again now I think we can remove it and use\nrt_node_insert_inner() with parent = NULL instead.\n\n>\n> Regarding the performance, I've\n> > added another boolean argument to bench_seq/shuffle_search(),\n> > specifying whether to use the shared radix tree or not. Here are\n> > benchmark results in my environment,\n>\n> > [...]\n>\n> > In non-shared radix tree cases (the forth argument is false), I don't\n> > see a visible performance degradation. On the other hand, in shared\n> > radix tree cases (the forth argument is true), I see visible overheads\n> > because of dsa_get_address().\n>\n> Thanks, this is useful.\n>\n> > Please note that the current shared radix tree implementation doesn't\n> > support any locking, so it cannot be read while written by someone.\n>\n> I think at the very least we need a global lock to enforce this.\n>\n> > Also, only one process can iterate over the shared radix tree. When it\n> > comes to parallel vacuum, these don't become restriction as the leader\n> > process writes the radix tree while scanning heap and the radix tree\n> > is read by multiple processes while vacuuming indexes. And only the\n> > leader process can do heap vacuum by iterating the key-value pairs in\n> > the radix tree. If we want to use it for other cases too, we would\n> > need to support locking, RCU or something.\n>\n> A useful exercise here is to think about what we'd need to do parallel heap pruning. We don't need to go that far for v16 of course, but what's the simplest thing we can do to make that possible? Other use cases can change to more sophisticated schemes if need be.\n\nFor parallel heap pruning, multiple workers will insert key-value\npairs to the radix tree concurrently. The simplest solution would be a\nsingle lock to protect writes but the performance will not be good.\nAnother solution would be that we can divide the tables into multiple\nranges so that keys derived from TIDs are not conflicted with each\nother and have parallel workers process one or more ranges. That way,\nparallel vacuum workers can build *sub-trees* and the leader process\ncan merge them. In use cases of lazy vacuum, since the write phase and\nread phase are separated the readers don't need to worry about\nconcurrent updates.\n\nI've attached a draft patch for lazy vacuum integration that can be\napplied on top of v8 patches. The patch adds a new module called\nTIDStore, an efficient storage for TID backed by radix tree. Lazy\nvacuum and parallel vacuum use it instead of a TID array. The patch\nalso introduces rt_detach() that was missed in 0002 patch. It's a very\nrough patch but I hope it helps in considering lazy vacuum\nintegration, radix tree APIs, and shared radix tree functionality.\nThere are some TODOs:\n\n* We need to reset the TIDStore and therefore reset the radix tree. It\ncan easily be done by using MemoryContextReset() in non-shared radix\ntree cases, but in shared case, we need either to free all radix tree\nnodes recursively or introduce a way to release all allocated DSA\nmemory.\n\n* We need to limit the size of TIDStore (mainly radix_tree) in\nmaintenance_work_mem.\n\n* We need to change the counter-based information in\npg_stat_progress_vacuum such as max_dead_tuples and num_dead_tuplesn.\nI think it would be better to show maximum bytes we can collect TIDs\nand its usage instead.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Sat, 5 Nov 2022 00:24:23 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Nov 4, 2022 at 10:25 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> For parallel heap pruning, multiple workers will insert key-value\n> pairs to the radix tree concurrently. The simplest solution would be a\n> single lock to protect writes but the performance will not be good.\n> Another solution would be that we can divide the tables into multiple\n> ranges so that keys derived from TIDs are not conflicted with each\n> other and have parallel workers process one or more ranges. That way,\n> parallel vacuum workers can build *sub-trees* and the leader process\n> can merge them. In use cases of lazy vacuum, since the write phase and\n> read phase are separated the readers don't need to worry about\n> concurrent updates.\n\nIt's a good idea to use ranges for a different reason -- readahead. See\ncommit 56788d2156fc3, which aimed to improve readahead for sequential\nscans. It might work to use that as a model: Each worker prunes a range of\n64 pages, keeping the dead tids in a local array. At the end of the range:\nlock the tid store, enter the tids into the store, unlock, free the local\narray, and get the next range from the leader. It's possible contention\nwon't be too bad, and I suspect using small local arrays as-we-go would be\nfaster and use less memory than merging multiple sub-trees at the end.\n\n> I've attached a draft patch for lazy vacuum integration that can be\n> applied on top of v8 patches. The patch adds a new module called\n> TIDStore, an efficient storage for TID backed by radix tree. Lazy\n> vacuum and parallel vacuum use it instead of a TID array. The patch\n> also introduces rt_detach() that was missed in 0002 patch. It's a very\n> rough patch but I hope it helps in considering lazy vacuum\n> integration, radix tree APIs, and shared radix tree functionality.\n\nIt does help, good to see this.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Fri, Nov 4, 2022 at 10:25 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> For parallel heap pruning, multiple workers will insert key-value> pairs to the radix tree concurrently. The simplest solution would be a> single lock to protect writes but the performance will not be good.> Another solution would be that we can divide the tables into multiple> ranges so that keys derived from TIDs are not conflicted with each> other and have parallel workers process one or more ranges. That way,> parallel vacuum workers can build *sub-trees* and the leader process> can merge them. In use cases of lazy vacuum, since the write phase and> read phase are separated the readers don't need to worry about> concurrent updates.It's a good idea to use ranges for a different reason -- readahead. See commit 56788d2156fc3, which aimed to improve readahead for sequential scans. It might work to use that as a model: Each worker prunes a range of 64 pages, keeping the dead tids in a local array. At the end of the range: lock the tid store, enter the tids into the store, unlock, free the local array, and get the next range from the leader. It's possible contention won't be too bad, and I suspect using small local arrays as-we-go would be faster and use less memory than merging multiple sub-trees at the end.> I've attached a draft patch for lazy vacuum integration that can be> applied on top of v8 patches. The patch adds a new module called> TIDStore, an efficient storage for TID backed by radix tree. Lazy> vacuum and parallel vacuum use it instead of a TID array. The patch> also introduces rt_detach() that was missed in 0002 patch. It's a very> rough patch but I hope it helps in considering lazy vacuum> integration, radix tree APIs, and shared radix tree functionality.It does help, good to see this.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Sat, 5 Nov 2022 16:22:54 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sat, Nov 5, 2022 at 6:23 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> On Fri, Nov 4, 2022 at 10:25 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > For parallel heap pruning, multiple workers will insert key-value\n> > pairs to the radix tree concurrently. The simplest solution would be a\n> > single lock to protect writes but the performance will not be good.\n> > Another solution would be that we can divide the tables into multiple\n> > ranges so that keys derived from TIDs are not conflicted with each\n> > other and have parallel workers process one or more ranges. That way,\n> > parallel vacuum workers can build *sub-trees* and the leader process\n> > can merge them. In use cases of lazy vacuum, since the write phase and\n> > read phase are separated the readers don't need to worry about\n> > concurrent updates.\n>\n> It's a good idea to use ranges for a different reason -- readahead. See commit 56788d2156fc3, which aimed to improve readahead for sequential scans. It might work to use that as a model: Each worker prunes a range of 64 pages, keeping the dead tids in a local array. At the end of the range: lock the tid store, enter the tids into the store, unlock, free the local array, and get the next range from the leader. It's possible contention won't be too bad, and I suspect using small local arrays as-we-go would be faster and use less memory than merging multiple sub-trees at the end.\n\nSeems a promising idea. I think it might work well even in the current\nparallel vacuum (ie., single writer). I mean, I think we can have a\nsingle lwlock for shared cases in the first version. If the overhead\nof acquiring the lwlock per insertion of key-value is not negligible,\nwe might want to try this idea.\n\nApart from that, I'm going to incorporate the comments on 0004 patch\nand try a pointer tagging.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 8 Nov 2022 23:14:39 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Nov 4, 2022 at 8:25 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> For parallel heap pruning, multiple workers will insert key-value\n> pairs to the radix tree concurrently. The simplest solution would be a\n> single lock to protect writes but the performance will not be good.\n> Another solution would be that we can divide the tables into multiple\n> ranges so that keys derived from TIDs are not conflicted with each\n> other and have parallel workers process one or more ranges. That way,\n> parallel vacuum workers can build *sub-trees* and the leader process\n> can merge them. In use cases of lazy vacuum, since the write phase and\n> read phase are separated the readers don't need to worry about\n> concurrent updates.\n\nI think that the VM snapshot concept can eventually be used to\nimplement parallel heap pruning. Since every page that will become a\nscanned_pages is known right from the start with VM snapshots, it will\nbe relatively straightforward to partition these pages into distinct\nranges with an equal number of pages, one per worker planned. The VM\nsnapshot structure can also be used for I/O prefetching, which will be\nmore important with parallel heap pruning (and with aio).\n\nWorking off of an immutable structure that describes which pages to\nprocess right from the start is naturally easy to work with, in\ngeneral. We can \"reorder work\" flexibly (i.e. process individual\nscanned_pages in any order that is convenient). Another example is\n\"changing our mind\" about advancing relfrozenxid when it turns out\nthat we maybe should have decided to do that at the start of VACUUM\n[1]. Maybe the specific \"changing our mind\" idea will turn out to not\nbe a very useful idea, but it is at least an interesting and thought\nprovoking concept.\n\n[1] https://postgr.es/m/CAH2-WzkQ86yf==mgAF=cQ0qeLRWKX3htLw9Qo+qx3zbwJJkPiQ@mail.gmail.com\n-- \nPeter Geoghegan\n\n\n", "msg_date": "Tue, 8 Nov 2022 09:57:42 -0800", "msg_from": "Peter Geoghegan <pg@bowt.ie>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Nov 8, 2022 at 11:14 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Sat, Nov 5, 2022 at 6:23 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n> >\n> > On Fri, Nov 4, 2022 at 10:25 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > For parallel heap pruning, multiple workers will insert key-value\n> > > pairs to the radix tree concurrently. The simplest solution would be a\n> > > single lock to protect writes but the performance will not be good.\n> > > Another solution would be that we can divide the tables into multiple\n> > > ranges so that keys derived from TIDs are not conflicted with each\n> > > other and have parallel workers process one or more ranges. That way,\n> > > parallel vacuum workers can build *sub-trees* and the leader process\n> > > can merge them. In use cases of lazy vacuum, since the write phase and\n> > > read phase are separated the readers don't need to worry about\n> > > concurrent updates.\n> >\n> > It's a good idea to use ranges for a different reason -- readahead. See commit 56788d2156fc3, which aimed to improve readahead for sequential scans. It might work to use that as a model: Each worker prunes a range of 64 pages, keeping the dead tids in a local array. At the end of the range: lock the tid store, enter the tids into the store, unlock, free the local array, and get the next range from the leader. It's possible contention won't be too bad, and I suspect using small local arrays as-we-go would be faster and use less memory than merging multiple sub-trees at the end.\n>\n> Seems a promising idea. I think it might work well even in the current\n> parallel vacuum (ie., single writer). I mean, I think we can have a\n> single lwlock for shared cases in the first version. If the overhead\n> of acquiring the lwlock per insertion of key-value is not negligible,\n> we might want to try this idea.\n>\n> Apart from that, I'm going to incorporate the comments on 0004 patch\n> and try a pointer tagging.\n\nI'd like to share some progress on this work.\n\n0004 patch is a new patch supporting a pointer tagging of the node\nkind. Also, it introduces rt_node_ptr we discussed so that internal\nfunctions use it rather than having two arguments for encoded and\ndecoded pointers. With this intermediate patch, the DSA support patch\nbecame more readable and understandable. Probably we can make it\nsmaller further if we move the change of separating the control object\nfrom radix_tree to the main patch (0002). The patch still needs to be\npolished but I'd like to check if this idea is worthwhile. If we agree\non this direction, this patch will be merged into the main radix tree\nimplementation patch.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 14 Nov 2022 17:43:40 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Nov 14, 2022 at 3:44 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> 0004 patch is a new patch supporting a pointer tagging of the node\n> kind. Also, it introduces rt_node_ptr we discussed so that internal\n> functions use it rather than having two arguments for encoded and\n> decoded pointers. With this intermediate patch, the DSA support patch\n> became more readable and understandable. Probably we can make it\n> smaller further if we move the change of separating the control object\n> from radix_tree to the main patch (0002). The patch still needs to be\n> polished but I'd like to check if this idea is worthwhile. If we agree\n> on this direction, this patch will be merged into the main radix tree\n> implementation patch.\n\nThanks for the new patch set. I've taken a very brief look at 0004 and I\nthink the broad outlines are okay. As you say it needs polish, but before\ngoing further, I'd like to do some experiments of my own as I mentioned\nearlier:\n\n- See how much performance we actually gain from tagging the node kind.\n- Try additional size classes while keeping the node kinds to only four.\n- Optimize node128 insert.\n- Try templating out the differences between local and shared memory. With\nlocal memory, the node-pointer struct would be a union, for example.\nTemplating would also reduce branches and re-simplify some internal APIs,\nbut it's likely that would also make the TID store and/or vacuum more\ncomplex, because at least some external functions would be duplicated.\n\nI'll set the patch to \"waiting on author\", but in this case the author is\nme.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Nov 14, 2022 at 3:44 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> 0004 patch is a new patch supporting a pointer tagging of the node> kind. Also, it introduces rt_node_ptr we discussed so that internal> functions use it rather than having two arguments for encoded and> decoded pointers. With this intermediate patch, the DSA support patch> became more readable and understandable. Probably we can make it> smaller further if we move the change of separating the control object> from radix_tree to the main patch (0002). The patch still needs to be> polished but I'd like to check if this idea is worthwhile. If we agree> on this direction, this patch will be merged into the main radix tree> implementation patch.Thanks for the new patch set. I've taken a very brief look at 0004 and I think the broad outlines are okay. As you say it needs polish, but before going further, I'd like to do some experiments of my own as I mentioned earlier:- See how much performance we actually gain from tagging the node kind. - Try additional size classes while keeping the node kinds to only four.- Optimize node128 insert. - Try templating out the differences between local and shared memory. With local memory, the node-pointer struct would be a union, for example. Templating would also reduce branches and re-simplify some internal APIs, but it's likely that would also make the TID store and/or vacuum more complex, because at least some external functions would be duplicated. I'll set the patch to \"waiting on author\", but in this case the author is me.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 14 Nov 2022 19:59:51 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Nov 14, 2022 at 10:00 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Nov 14, 2022 at 3:44 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > 0004 patch is a new patch supporting a pointer tagging of the node\n> > kind. Also, it introduces rt_node_ptr we discussed so that internal\n> > functions use it rather than having two arguments for encoded and\n> > decoded pointers. With this intermediate patch, the DSA support patch\n> > became more readable and understandable. Probably we can make it\n> > smaller further if we move the change of separating the control object\n> > from radix_tree to the main patch (0002). The patch still needs to be\n> > polished but I'd like to check if this idea is worthwhile. If we agree\n> > on this direction, this patch will be merged into the main radix tree\n> > implementation patch.\n>\n> Thanks for the new patch set. I've taken a very brief look at 0004 and I think the broad outlines are okay. As you say it needs polish, but before going further, I'd like to do some experiments of my own as I mentioned earlier:\n>\n> - See how much performance we actually gain from tagging the node kind.\n> - Try additional size classes while keeping the node kinds to only four.\n> - Optimize node128 insert.\n> - Try templating out the differences between local and shared memory. With local memory, the node-pointer struct would be a union, for example. Templating would also reduce branches and re-simplify some internal APIs, but it's likely that would also make the TID store and/or vacuum more complex, because at least some external functions would be duplicated.\n\nThanks! Please let me know if there is something I can help with.\n\nIn the meanwhile, I'd like to make some progress on the vacuum\nintegration and improving the test coverages.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 15 Nov 2022 13:58:29 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Nov 15, 2022 at 11:59 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> Thanks! Please let me know if there is something I can help with.\n\nI didn't get very far because the tests fail on 0004 in rt_verify_node:\n\nTRAP: failed Assert(\"n4->chunks[i - 1] < n4->chunks[i]\"), File:\n\"../src/backend/lib/radixtree.c\", Line: 2186, PID: 18242\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Tue, Nov 15, 2022 at 11:59 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> Thanks! Please let me know if there is something I can help with.I didn't get very far because the tests fail on 0004 in rt_verify_node:TRAP: failed Assert(\"n4->chunks[i - 1] < n4->chunks[i]\"), File: \"../src/backend/lib/radixtree.c\", Line: 2186, PID: 18242--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 16 Nov 2022 11:46:18 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Nov 16, 2022 at 11:46 AM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n>\n>\n> On Tue, Nov 15, 2022 at 11:59 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> > Thanks! Please let me know if there is something I can help with.\n>\n> I didn't get very far because the tests fail on 0004 in rt_verify_node:\n>\n> TRAP: failed Assert(\"n4->chunks[i - 1] < n4->chunks[i]\"), File:\n\"../src/backend/lib/radixtree.c\", Line: 2186, PID: 18242\n\nActually I do want to offer some general advice. Upthread I recommended a\npurely refactoring patch that added the node-pointer struct but did nothing\nelse, so that the DSA changes would be smaller. 0004 attempted pointer\ntagging in the same commit, which makes it no longer a purely refactoring\npatch, so that 1) makes it harder to tell what part caused the bug and 2)\nobscures what is necessary for DSA pointers and what was additionally\nnecessary for pointer tagging. Shared memory support is a prerequisite for\na shippable feature, but pointer tagging is (hopefully) a performance\noptimization. Let's keep them separate.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Wed, Nov 16, 2022 at 11:46 AM John Naylor <john.naylor@enterprisedb.com> wrote:>>> On Tue, Nov 15, 2022 at 11:59 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> > Thanks! Please let me know if there is something I can help with.>> I didn't get very far because the tests fail on 0004 in rt_verify_node:>> TRAP: failed Assert(\"n4->chunks[i - 1] < n4->chunks[i]\"), File: \"../src/backend/lib/radixtree.c\", Line: 2186, PID: 18242Actually I do want to offer some general advice. Upthread I recommended a purely refactoring patch that added the node-pointer struct but did nothing else, so that the DSA changes would be smaller. 0004 attempted pointer tagging in the same commit, which makes it no longer a purely refactoring patch, so that 1) makes it harder to tell what part caused the bug and 2) obscures what is necessary for DSA pointers and what was additionally necessary for pointer tagging. Shared memory support is a prerequisite for a shippable feature, but pointer tagging is (hopefully) a performance optimization. Let's keep them separate.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 16 Nov 2022 12:17:23 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Nov 16, 2022 at 1:46 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Tue, Nov 15, 2022 at 11:59 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > Thanks! Please let me know if there is something I can help with.\n>\n> I didn't get very far because the tests fail on 0004 in rt_verify_node:\n>\n> TRAP: failed Assert(\"n4->chunks[i - 1] < n4->chunks[i]\"), File: \"../src/backend/lib/radixtree.c\", Line: 2186, PID: 18242\n\nWhich tests do you use to get this assertion failure? I've confirmed\nthere is a bug in 0005 patch but without it, \"make check-world\"\npassed.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 16 Nov 2022 14:33:05 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Nov 16, 2022 at 2:17 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n>\n> On Wed, Nov 16, 2022 at 11:46 AM John Naylor <john.naylor@enterprisedb.com> wrote:\n> >\n> >\n> > On Tue, Nov 15, 2022 at 11:59 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > Thanks! Please let me know if there is something I can help with.\n> >\n> > I didn't get very far because the tests fail on 0004 in rt_verify_node:\n> >\n> > TRAP: failed Assert(\"n4->chunks[i - 1] < n4->chunks[i]\"), File: \"../src/backend/lib/radixtree.c\", Line: 2186, PID: 18242\n>\n> Actually I do want to offer some general advice. Upthread I recommended a purely refactoring patch that added the node-pointer struct but did nothing else, so that the DSA changes would be smaller. 0004 attempted pointer tagging in the same commit, which makes it no longer a purely refactoring patch, so that 1) makes it harder to tell what part caused the bug and 2) obscures what is necessary for DSA pointers and what was additionally necessary for pointer tagging. Shared memory support is a prerequisite for a shippable feature, but pointer tagging is (hopefully) a performance optimization. Let's keep them separate.\n\nTotally agreed. I'll separate them in the next version patch. Thank\nyou for your advice.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 16 Nov 2022 14:34:15 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Nov 16, 2022 at 12:33 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Wed, Nov 16, 2022 at 1:46 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> >\n> > On Tue, Nov 15, 2022 at 11:59 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> > > Thanks! Please let me know if there is something I can help with.\n> >\n> > I didn't get very far because the tests fail on 0004 in rt_verify_node:\n> >\n> > TRAP: failed Assert(\"n4->chunks[i - 1] < n4->chunks[i]\"), File:\n\"../src/backend/lib/radixtree.c\", Line: 2186, PID: 18242\n>\n> Which tests do you use to get this assertion failure? I've confirmed\n> there is a bug in 0005 patch but without it, \"make check-world\"\n> passed.\n\nHmm, I started over and rebuilt and it didn't reproduce. Not sure what\nhappened, sorry for the noise.\n\nI'm attaching a test I wrote to stress test branch prediction in search,\nand while trying it out I found two possible issues.\n\nIt's based on the random int load test, but tests search speed. Run like\nthis:\n\nselect * from bench_search_random_nodes(10 * 1000 * 1000)\n\nIt also takes some care to include all the different node kinds,\nrestricting the possible keys by AND-ing with a filter. Here's a simple\ndemo:\n\nfilter = ((uint64)1<<40)-1;\nLOG: num_keys = 9999967, height = 4, n4 = 17513814, n32 = 6320, n128 =\n62663, n256 = 3130\n\nJust using random integers leads to >99% using the smallest node. I wanted\nto get close to having the same number of each, but that's difficult while\nstill using random inputs. I ended up using\n\nfilter = (((uint64) 0x7F<<32) | (0x07<<24) | (0xFF<<16) | 0xFF)\n\nwhich gives\n\nLOG: num_keys = 9291812, height = 4, n4 = 262144, n32 = 79603, n128 =\n182670, n256 = 1024\n\nWhich seems okay for the task. One puzzling thing I found while trying\nvarious filters is that sometimes the reported tree height would change.\nFor example:\n\nfilter = (((uint64) 1<<32) | (0xFF<<24));\nLOG: num_keys = 9999944, height = 7, n4 = 47515559, n32 = 6209, n128 =\n62632, n256 = 3161\n\n1) Any idea why the tree height would be reported as 7 here? I didn't\nexpect that.\n\n2) It seems that 0004 actually causes a significant slowdown in this test\n(as in the attached, using the second filter above and with turboboost\ndisabled):\n\nv9 0003: 2062 2051 2050\nv9 0004: 2346 2316 2321\n\nThat means my idea for the pointer struct might have some problems, at\nleast as currently implemented. Maybe in the course of separating out and\npolishing that piece, an inefficiency will fall out. Or, it might be\nanother reason to template local and shared separately. Not sure yet. I\nalso haven't tried to adjust this test for the shared memory case.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 16 Nov 2022 14:39:00 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Nov 16, 2022 at 4:39 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Wed, Nov 16, 2022 at 12:33 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Wed, Nov 16, 2022 at 1:46 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > >\n> > > On Tue, Nov 15, 2022 at 11:59 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > > Thanks! Please let me know if there is something I can help with.\n> > >\n> > > I didn't get very far because the tests fail on 0004 in rt_verify_node:\n> > >\n> > > TRAP: failed Assert(\"n4->chunks[i - 1] < n4->chunks[i]\"), File: \"../src/backend/lib/radixtree.c\", Line: 2186, PID: 18242\n> >\n> > Which tests do you use to get this assertion failure? I've confirmed\n> > there is a bug in 0005 patch but without it, \"make check-world\"\n> > passed.\n>\n> Hmm, I started over and rebuilt and it didn't reproduce. Not sure what happened, sorry for the noise.\n\nGood to know. No problem.\n\n> I'm attaching a test I wrote to stress test branch prediction in search, and while trying it out I found two possible issues.\n\nThank you for testing!\n\n>\n> It's based on the random int load test, but tests search speed. Run like this:\n>\n> select * from bench_search_random_nodes(10 * 1000 * 1000)\n>\n> It also takes some care to include all the different node kinds, restricting the possible keys by AND-ing with a filter. Here's a simple demo:\n>\n> filter = ((uint64)1<<40)-1;\n> LOG: num_keys = 9999967, height = 4, n4 = 17513814, n32 = 6320, n128 = 62663, n256 = 3130\n>\n> Just using random integers leads to >99% using the smallest node. I wanted to get close to having the same number of each, but that's difficult while still using random inputs. I ended up using\n>\n> filter = (((uint64) 0x7F<<32) | (0x07<<24) | (0xFF<<16) | 0xFF)\n>\n> which gives\n>\n> LOG: num_keys = 9291812, height = 4, n4 = 262144, n32 = 79603, n128 = 182670, n256 = 1024\n>\n> Which seems okay for the task. One puzzling thing I found while trying various filters is that sometimes the reported tree height would change. For example:\n>\n> filter = (((uint64) 1<<32) | (0xFF<<24));\n> LOG: num_keys = 9999944, height = 7, n4 = 47515559, n32 = 6209, n128 = 62632, n256 = 3161\n>\n> 1) Any idea why the tree height would be reported as 7 here? I didn't expect that.\n\nIn my environment, (0xFF<<24) is 0xFFFFFFFFFF000000, not 0xFF000000.\nIt seems the filter should be (((uint64) 1<<32) | ((uint64)\n0xFF<<24)).\n\n>\n> 2) It seems that 0004 actually causes a significant slowdown in this test (as in the attached, using the second filter above and with turboboost disabled):\n>\n> v9 0003: 2062 2051 2050\n> v9 0004: 2346 2316 2321\n>\n> That means my idea for the pointer struct might have some problems, at least as currently implemented. Maybe in the course of separating out and polishing that piece, an inefficiency will fall out. Or, it might be another reason to template local and shared separately. Not sure yet. I also haven't tried to adjust this test for the shared memory case.\n\nI'll also run the test on my environment and do the investigation tomorrow.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 17 Nov 2022 00:24:05 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Sep 28, 2022 at 1:18 PM I wrote:\n\n> Along those lines, one thing I've been thinking about is the number of\nsize classes. There is a tradeoff between memory efficiency and number of\nbranches when searching/inserting. My current thinking is there is too much\ncoupling between size class and data type. Each size class currently uses a\ndifferent data type and a different algorithm to search and set it, which\nin turn requires another branch. We've found that a larger number of size\nclasses leads to poor branch prediction [1] and (I imagine) code density.\n>\n> I'm thinking we can use \"flexible array members\" for the values/pointers,\nand keep the rest of the control data in the struct the same. That way, we\nnever have more than 4 actual \"kinds\" to code and branch on. As a bonus,\nwhen migrating a node to a larger size class of the same kind, we can\nsimply repalloc() to the next size.\n\nWhile the most important challenge right now is how to best represent and\norganize the shared memory case, I wanted to get the above idea working and\nout of the way, to be saved for a future time. I've attached a rough\nimplementation (applies on top of v9 0003) that splits node32 into 2 size\nclasses. They both share the exact same base data type and hence the same\nsearch/set code, so the number of \"kind\"s is still four, but here there are\nfive \"size classes\", so a new case in the \"unlikely\" node-growing path. The\nsmaller instance of node32 is a \"node15\", because that's currently 160\nbytes, corresponding to one of the DSA size classes. This idea can be\napplied to any other node except the max size, as we see fit. (Adding a\nsingleton size class would bring it back in line with the prototype, at\nleast as far as memory consumption.)\n\nOne issue with this patch: The \"fanout\" member is a uint8, so it can't hold\n256 for the largest node kind. That's not an issue in practice, since we\nnever need to grow it, and we only compare that value with the count in an\nAssert(), so I just set it to zero. That does break an invariant, so it's\nnot great. We could use 2 bytes to be strictly correct in all cases, but\nthat limits what we can do with the smallest node kind.\n\nIn the course of working on this, I encountered a pain point. Since it's\nimpossible to repalloc in slab, we have to do alloc/copy/free ourselves.\nThat's fine, but the current coding makes too many assumptions about the\nuse cases: rt_alloc_node and rt_copy_node are too entangled with each other\nand do too much work unrelated to what the names imply. I seem to remember\nan earlier version had something like rt_node_copy_common that did\nonly...copying. That was much easier to reason about. In 0002 I resorted to\ndoing my own allocation to show what I really want to do, because the new\nuse case doesn't need zeroing and setting values. It only needs\nto...allocate (and increase the stats counter if built that way).\n\nFuture optimization work while I'm thinking of it: rt_alloc_node should be\nalways-inlined and the memset done separately (i.e. not *AllocZero). That\nway the compiler should be able generate more efficient zeroing code for\nsmaller nodes. I'll test the numbers on this sometime in the future.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 18 Nov 2022 14:48:49 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Nov 17, 2022 at 12:24 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Wed, Nov 16, 2022 at 4:39 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> >\n> > On Wed, Nov 16, 2022 at 12:33 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Wed, Nov 16, 2022 at 1:46 PM John Naylor\n> > > <john.naylor@enterprisedb.com> wrote:\n> > > >\n> > > >\n> > > > On Tue, Nov 15, 2022 at 11:59 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > > > Thanks! Please let me know if there is something I can help with.\n> > > >\n> > > > I didn't get very far because the tests fail on 0004 in rt_verify_node:\n> > > >\n> > > > TRAP: failed Assert(\"n4->chunks[i - 1] < n4->chunks[i]\"), File: \"../src/backend/lib/radixtree.c\", Line: 2186, PID: 18242\n> > >\n> > > Which tests do you use to get this assertion failure? I've confirmed\n> > > there is a bug in 0005 patch but without it, \"make check-world\"\n> > > passed.\n> >\n> > Hmm, I started over and rebuilt and it didn't reproduce. Not sure what happened, sorry for the noise.\n>\n> Good to know. No problem.\n>\n> > I'm attaching a test I wrote to stress test branch prediction in search, and while trying it out I found two possible issues.\n>\n> Thank you for testing!\n>\n> >\n> > It's based on the random int load test, but tests search speed. Run like this:\n> >\n> > select * from bench_search_random_nodes(10 * 1000 * 1000)\n> >\n> > It also takes some care to include all the different node kinds, restricting the possible keys by AND-ing with a filter. Here's a simple demo:\n> >\n> > filter = ((uint64)1<<40)-1;\n> > LOG: num_keys = 9999967, height = 4, n4 = 17513814, n32 = 6320, n128 = 62663, n256 = 3130\n> >\n> > Just using random integers leads to >99% using the smallest node. I wanted to get close to having the same number of each, but that's difficult while still using random inputs. I ended up using\n> >\n> > filter = (((uint64) 0x7F<<32) | (0x07<<24) | (0xFF<<16) | 0xFF)\n> >\n> > which gives\n> >\n> > LOG: num_keys = 9291812, height = 4, n4 = 262144, n32 = 79603, n128 = 182670, n256 = 1024\n> >\n> > Which seems okay for the task. One puzzling thing I found while trying various filters is that sometimes the reported tree height would change. For example:\n> >\n> > filter = (((uint64) 1<<32) | (0xFF<<24));\n> > LOG: num_keys = 9999944, height = 7, n4 = 47515559, n32 = 6209, n128 = 62632, n256 = 3161\n> >\n> > 1) Any idea why the tree height would be reported as 7 here? I didn't expect that.\n>\n> In my environment, (0xFF<<24) is 0xFFFFFFFFFF000000, not 0xFF000000.\n> It seems the filter should be (((uint64) 1<<32) | ((uint64)\n> 0xFF<<24)).\n>\n> >\n> > 2) It seems that 0004 actually causes a significant slowdown in this test (as in the attached, using the second filter above and with turboboost disabled):\n> >\n> > v9 0003: 2062 2051 2050\n> > v9 0004: 2346 2316 2321\n> >\n> > That means my idea for the pointer struct might have some problems, at least as currently implemented. Maybe in the course of separating out and polishing that piece, an inefficiency will fall out. Or, it might be another reason to template local and shared separately. Not sure yet. I also haven't tried to adjust this test for the shared memory case.\n>\n> I'll also run the test on my environment and do the investigation tomorrow.\n>\n\nFYI I've not tested the patch you shared today but here are the\nbenchmark results I did with the v9 patch in my environment (I used\nthe second filter). I splitted 0004 patch into two patches: a patch\nfor pure refactoring patch to introduce rt_node_ptr and a patch to do\npointer tagging.\n\nv9 0003 patch : 1113 1114 1114\nintroduce rt_node_ptr: 1127 1128 1128\npointer tagging : 1085 1087 1086 (equivalent to 0004 patch)\n\nIn my environment, rt_node_ptr seemed to lead some overhead but\npointer tagging had performance benefits. I'm not sure the reason why\nthe results are different from yours. The radix tree stats shows the\nsame as your tests.\n\n=# select * from bench_search_random_nodes(10 * 1000 * 1000);\n2022-11-18 22:18:21.608 JST [3913544] LOG: num_keys = 9291812, height\n= 4, n4 = 262144, n32 =79603, n128 = 182670, n256 = 1024\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 18 Nov 2022 22:20:10 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Nov 18, 2022 at 8:20 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> FYI I've not tested the patch you shared today but here are the\n> benchmark results I did with the v9 patch in my environment (I used\n> the second filter). I splitted 0004 patch into two patches: a patch\n> for pure refactoring patch to introduce rt_node_ptr and a patch to do\n> pointer tagging.\n>\n> v9 0003 patch : 1113 1114 1114\n> introduce rt_node_ptr: 1127 1128 1128\n> pointer tagging : 1085 1087 1086 (equivalent to 0004 patch)\n>\n> In my environment, rt_node_ptr seemed to lead some overhead but\n> pointer tagging had performance benefits. I'm not sure the reason why\n> the results are different from yours. The radix tree stats shows the\n> same as your tests.\n\nThere is less than 2% difference from the medial set of results, so it's\nhard to distinguish from noise. I did a fresh rebuild and retested with the\nsame results: about 15% slowdown in v9 0004. That's strange.\n\nOn Wed, Nov 16, 2022 at 10:24 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> > filter = (((uint64) 1<<32) | (0xFF<<24));\n> > LOG: num_keys = 9999944, height = 7, n4 = 47515559, n32 = 6209, n128 =\n62632, n256 = 3161\n> >\n> > 1) Any idea why the tree height would be reported as 7 here? I didn't\nexpect that.\n>\n> In my environment, (0xFF<<24) is 0xFFFFFFFFFF000000, not 0xFF000000.\n> It seems the filter should be (((uint64) 1<<32) | ((uint64)\n> 0xFF<<24)).\n\nUgh, sign extension, brain fade on my part. Thanks, I'm glad there was a\nstraightforward explanation.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Fri, Nov 18, 2022 at 8:20 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> FYI I've not tested the patch you shared today but here are the> benchmark results I did with the v9 patch in my environment (I used> the second filter). I splitted 0004 patch into two patches: a patch> for pure refactoring patch to introduce rt_node_ptr and a patch to do> pointer tagging.>> v9 0003 patch            : 1113 1114 1114> introduce rt_node_ptr: 1127 1128 1128> pointer tagging          : 1085 1087 1086 (equivalent to 0004 patch)>> In my environment, rt_node_ptr seemed to lead some overhead but> pointer tagging had performance benefits. I'm not sure the reason why> the results are different from yours. The radix tree stats shows the> same as your tests.There is less than 2% difference from the medial set of results, so it's hard to distinguish from noise. I did a fresh rebuild and retested with the same results: about 15% slowdown in v9 0004. That's strange.On Wed, Nov 16, 2022 at 10:24 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> > filter = (((uint64) 1<<32) | (0xFF<<24));> > LOG:  num_keys = 9999944, height = 7, n4 = 47515559, n32 = 6209, n128 = 62632, n256 = 3161> >> > 1) Any idea why the tree height would be reported as 7 here? I didn't expect that.>> In my environment, (0xFF<<24) is 0xFFFFFFFFFF000000, not 0xFF000000.> It seems the filter should be (((uint64) 1<<32) | ((uint64)> 0xFF<<24)).Ugh, sign extension, brain fade on my part. Thanks, I'm glad there was a straightforward explanation.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Sat, 19 Nov 2022 13:05:30 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Nov 18, 2022 at 8:20 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Thu, Nov 17, 2022 at 12:24 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> >\n> > On Wed, Nov 16, 2022 at 4:39 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n\n> > > That means my idea for the pointer struct might have some problems,\nat least as currently implemented. Maybe in the course of separating out\nand polishing that piece, an inefficiency will fall out. Or, it might be\nanother reason to template local and shared separately. Not sure yet. I\nalso haven't tried to adjust this test for the shared memory case.\n\nDigging a bit deeper, I see a flaw in my benchmark: Even though the total\ndistribution of node kinds is decently even, the pattern that the benchmark\nsees is not terribly random:\n\n 3,343,352 branch-misses:u # 0.85% of all\nbranches\n 393,204,959 branches:u\n\nRecall a previous benchmark [1] where the leaf node was about half node16\nand half node32. Randomizing the leaf node between the two caused branch\nmisses to go from 1% to 2%, causing a noticeable slowdown. Maybe in this\nnew benchmark, each level has a skewed distribution of nodes, giving a\nsmart branch predictor something to work with. We will need a way to\nefficiently generate keys that lead to a relatively unpredictable\ndistribution of node kinds, as seen by a searcher. Especially in the leaves\n(or just above the leaves), since those are less likely to be cached.\n\n> > I'll also run the test on my environment and do the investigation\ntomorrow.\n> >\n>\n> FYI I've not tested the patch you shared today but here are the\n> benchmark results I did with the v9 patch in my environment (I used\n> the second filter). I splitted 0004 patch into two patches: a patch\n> for pure refactoring patch to introduce rt_node_ptr and a patch to do\n> pointer tagging.\n\nWould you be able to share the refactoring patch? And a fix for the failing\ntests? I'm thinking I want to try the templating approach fairly soon.\n\n[1]\nhttps://www.postgresql.org/message-id/CAFBsxsFEVckVzsBsfgGzGR4Yz%3DJp%3DUxOtjYvTjOz6fOoLXtOig%40mail.gmail.com\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Fri, Nov 18, 2022 at 8:20 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Thu, Nov 17, 2022 at 12:24 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> >> > On Wed, Nov 16, 2022 at 4:39 PM John Naylor> > <john.naylor@enterprisedb.com> wrote:> > > That means my idea for the pointer struct might have some problems, at least as currently implemented. Maybe in the course of separating out and polishing that piece, an inefficiency will fall out. Or, it might be another reason to template local and shared separately. Not sure yet. I also haven't tried to adjust this test for the shared memory case.Digging a bit deeper, I see a flaw in my benchmark: Even though the total distribution of node kinds is decently even, the pattern that the benchmark sees is not terribly random:         3,343,352      branch-misses:u                  #    0.85% of all branches               393,204,959      branches:u Recall a previous benchmark [1] where the leaf node was about half node16 and half node32. Randomizing the leaf node between the two caused branch misses to go from 1% to 2%, causing a noticeable slowdown. Maybe in this new benchmark, each level has a skewed distribution of nodes, giving a smart branch predictor something to work with. We will need a way to efficiently generate keys that lead to a relatively unpredictable distribution of node kinds, as seen by a searcher. Especially in the leaves (or just above the leaves), since those are less likely to be cached.> > I'll also run the test on my environment and do the investigation tomorrow.> >>> FYI I've not tested the patch you shared today but here are the> benchmark results I did with the v9 patch in my environment (I used> the second filter). I splitted 0004 patch into two patches: a patch> for pure refactoring patch to introduce rt_node_ptr and a patch to do> pointer tagging.Would you be able to share the refactoring patch? And a fix for the failing tests? I'm thinking I want to try the templating approach fairly soon.[1] https://www.postgresql.org/message-id/CAFBsxsFEVckVzsBsfgGzGR4Yz%3DJp%3DUxOtjYvTjOz6fOoLXtOig%40mail.gmail.com--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 21 Nov 2022 13:43:09 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Nov 18, 2022 at 2:48 PM I wrote:\n> One issue with this patch: The \"fanout\" member is a uint8, so it can't\nhold 256 for the largest node kind. That's not an issue in practice, since\nwe never need to grow it, and we only compare that value with the count in\nan Assert(), so I just set it to zero. That does break an invariant, so\nit's not great. We could use 2 bytes to be strictly correct in all cases,\nbut that limits what we can do with the smallest node kind.\n\nThinking about this part, there's an easy resolution -- use a different\nmacro for fixed- and variable-sized node kinds to determine if there is a\nfree slot.\n\nAlso, I wanted to share some results of adjusting the boundary between the\ntwo smallest node kinds. In the hackish attached patch, I modified the\nfixed height search benchmark to search a small (within L1 cache) tree\nthousands of times. For the first set I modified node4's maximum fanout and\nfilled it up. For the second, I set node4's fanout to 1, which causes 2+ to\nspill to node32 (actually the partially-filled node15 size class\nas demoed earlier).\n\nnode4:\n\nNOTICE: num_keys = 16, height = 3, n4 = 15, n15 = 0, n32 = 0, n128 = 0,\nn256 = 0\n fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n--------+-------+------------------+------------+--------------\n 2 | 16 | 16520 | 0 | 3\n\nNOTICE: num_keys = 81, height = 3, n4 = 40, n15 = 0, n32 = 0, n128 = 0,\nn256 = 0\n fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n--------+-------+------------------+------------+--------------\n 3 | 81 | 16456 | 0 | 17\n\nNOTICE: num_keys = 256, height = 3, n4 = 85, n15 = 0, n32 = 0, n128 = 0,\nn256 = 0\n fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n--------+-------+------------------+------------+--------------\n 4 | 256 | 16456 | 0 | 89\n\nNOTICE: num_keys = 625, height = 3, n4 = 156, n15 = 0, n32 = 0, n128 = 0,\nn256 = 0\n fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n--------+-------+------------------+------------+--------------\n 5 | 625 | 16488 | 0 | 327\n\n\nnode32:\n\nNOTICE: num_keys = 16, height = 3, n4 = 0, n15 = 15, n32 = 0, n128 = 0,\nn256 = 0\n fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n--------+-------+------------------+------------+--------------\n 2 | 16 | 16488 | 0 | 5\n(1 row)\n\nNOTICE: num_keys = 81, height = 3, n4 = 0, n15 = 40, n32 = 0, n128 = 0,\nn256 = 0\n fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n--------+-------+------------------+------------+--------------\n 3 | 81 | 16520 | 0 | 28\n\nNOTICE: num_keys = 256, height = 3, n4 = 0, n15 = 85, n32 = 0, n128 = 0,\nn256 = 0\n fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n--------+-------+------------------+------------+--------------\n 4 | 256 | 16408 | 0 | 79\n\nNOTICE: num_keys = 625, height = 3, n4 = 0, n15 = 156, n32 = 0, n128 = 0,\nn256 = 0\n fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n--------+-------+------------------+------------+--------------\n 5 | 625 | 24616 | 0 | 199\n\nIn this test, node32 seems slightly faster than node4 with 4 elements, at\nthe cost of more memory.\n\nAssuming the smallest node is fixed size (i.e. fanout/capacity member not\npart of the common set, so only part of variable-sized nodes), 3 has a nice\nproperty: no wasted padding space:\n\nnode4: 5 + 4+(7) + 4*8 = 48 bytes\nnode3: 5 + 3 + 3*8 = 32\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Fri, Nov 18, 2022 at 2:48 PM I wrote:> One issue with this patch: The \"fanout\" member is a uint8, so it can't hold 256 for the largest node kind. That's not an issue in practice, since we never need to grow it, and we only compare that value with the count in an Assert(), so I just set it to zero. That does break an invariant, so it's not great. We could use 2 bytes to be strictly correct in all cases, but that limits what we can do with the smallest node kind.Thinking about this part, there's an easy resolution -- use a different macro for fixed- and variable-sized node kinds to determine if there is a free slot.Also, I wanted to share some results of adjusting the boundary between the two smallest node kinds. In the hackish attached patch, I modified the fixed height search benchmark to search a small (within L1 cache) tree thousands of times. For the first set I modified node4's maximum fanout and filled it up. For the second, I set node4's fanout to 1, which causes 2+ to spill to node32 (actually the partially-filled node15 size class as demoed earlier).node4:NOTICE:  num_keys = 16, height = 3, n4 = 15, n15 = 0, n32 = 0, n128 = 0, n256 = 0 fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms --------+-------+------------------+------------+--------------      2 |    16 |            16520 |          0 |            3NOTICE:  num_keys = 81, height = 3, n4 = 40, n15 = 0, n32 = 0, n128 = 0, n256 = 0 fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms --------+-------+------------------+------------+--------------      3 |    81 |            16456 |          0 |           17NOTICE:  num_keys = 256, height = 3, n4 = 85, n15 = 0, n32 = 0, n128 = 0, n256 = 0 fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms --------+-------+------------------+------------+--------------      4 |   256 |            16456 |          0 |           89NOTICE:  num_keys = 625, height = 3, n4 = 156, n15 = 0, n32 = 0, n128 = 0, n256 = 0 fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms --------+-------+------------------+------------+--------------      5 |   625 |            16488 |          0 |          327node32:NOTICE:  num_keys = 16, height = 3, n4 = 0, n15 = 15, n32 = 0, n128 = 0, n256 = 0 fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms --------+-------+------------------+------------+--------------      2 |    16 |            16488 |          0 |            5(1 row)NOTICE:  num_keys = 81, height = 3, n4 = 0, n15 = 40, n32 = 0, n128 = 0, n256 = 0 fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms --------+-------+------------------+------------+--------------      3 |    81 |            16520 |          0 |           28NOTICE:  num_keys = 256, height = 3, n4 = 0, n15 = 85, n32 = 0, n128 = 0, n256 = 0 fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms --------+-------+------------------+------------+--------------      4 |   256 |            16408 |          0 |           79NOTICE:  num_keys = 625, height = 3, n4 = 0, n15 = 156, n32 = 0, n128 = 0, n256 = 0 fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms --------+-------+------------------+------------+--------------      5 |   625 |            24616 |          0 |          199In this test, node32 seems slightly faster than node4 with 4 elements, at the cost of more memory. Assuming the smallest node is fixed size (i.e. fanout/capacity member not part of the common set, so only part of variable-sized nodes), 3 has a nice property: no wasted padding space:node4: 5 + 4+(7) + 4*8 = 48 bytesnode3: 5 + 3     + 3*8 = 32--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 21 Nov 2022 14:20:03 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Nov 21, 2022 at 3:43 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Fri, Nov 18, 2022 at 8:20 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Nov 17, 2022 at 12:24 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Wed, Nov 16, 2022 at 4:39 PM John Naylor\n> > > <john.naylor@enterprisedb.com> wrote:\n>\n> > > > That means my idea for the pointer struct might have some problems, at least as currently implemented. Maybe in the course of separating out and polishing that piece, an inefficiency will fall out. Or, it might be another reason to template local and shared separately. Not sure yet. I also haven't tried to adjust this test for the shared memory case.\n>\n> Digging a bit deeper, I see a flaw in my benchmark: Even though the total distribution of node kinds is decently even, the pattern that the benchmark sees is not terribly random:\n>\n> 3,343,352 branch-misses:u # 0.85% of all branches\n> 393,204,959 branches:u\n>\n> Recall a previous benchmark [1] where the leaf node was about half node16 and half node32. Randomizing the leaf node between the two caused branch misses to go from 1% to 2%, causing a noticeable slowdown. Maybe in this new benchmark, each level has a skewed distribution of nodes, giving a smart branch predictor something to work with. We will need a way to efficiently generate keys that lead to a relatively unpredictable distribution of node kinds, as seen by a searcher. Especially in the leaves (or just above the leaves), since those are less likely to be cached.\n>\n> > > I'll also run the test on my environment and do the investigation tomorrow.\n> > >\n> >\n> > FYI I've not tested the patch you shared today but here are the\n> > benchmark results I did with the v9 patch in my environment (I used\n> > the second filter). I splitted 0004 patch into two patches: a patch\n> > for pure refactoring patch to introduce rt_node_ptr and a patch to do\n> > pointer tagging.\n>\n> Would you be able to share the refactoring patch? And a fix for the failing tests? I'm thinking I want to try the templating approach fairly soon.\n>\n\nSure. I've attached the v10 patches. 0004 is the pure refactoring\npatch and 0005 patch introduces the pointer tagging.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 21 Nov 2022 17:06:56 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Nov 21, 2022 at 4:20 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Fri, Nov 18, 2022 at 2:48 PM I wrote:\n> > One issue with this patch: The \"fanout\" member is a uint8, so it can't hold 256 for the largest node kind. That's not an issue in practice, since we never need to grow it, and we only compare that value with the count in an Assert(), so I just set it to zero. That does break an invariant, so it's not great. We could use 2 bytes to be strictly correct in all cases, but that limits what we can do with the smallest node kind.\n>\n> Thinking about this part, there's an easy resolution -- use a different macro for fixed- and variable-sized node kinds to determine if there is a free slot.\n>\n> Also, I wanted to share some results of adjusting the boundary between the two smallest node kinds. In the hackish attached patch, I modified the fixed height search benchmark to search a small (within L1 cache) tree thousands of times. For the first set I modified node4's maximum fanout and filled it up. For the second, I set node4's fanout to 1, which causes 2+ to spill to node32 (actually the partially-filled node15 size class as demoed earlier).\n>\n> node4:\n>\n> NOTICE: num_keys = 16, height = 3, n4 = 15, n15 = 0, n32 = 0, n128 = 0, n256 = 0\n> fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n> --------+-------+------------------+------------+--------------\n> 2 | 16 | 16520 | 0 | 3\n>\n> NOTICE: num_keys = 81, height = 3, n4 = 40, n15 = 0, n32 = 0, n128 = 0, n256 = 0\n> fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n> --------+-------+------------------+------------+--------------\n> 3 | 81 | 16456 | 0 | 17\n>\n> NOTICE: num_keys = 256, height = 3, n4 = 85, n15 = 0, n32 = 0, n128 = 0, n256 = 0\n> fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n> --------+-------+------------------+------------+--------------\n> 4 | 256 | 16456 | 0 | 89\n>\n> NOTICE: num_keys = 625, height = 3, n4 = 156, n15 = 0, n32 = 0, n128 = 0, n256 = 0\n> fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n> --------+-------+------------------+------------+--------------\n> 5 | 625 | 16488 | 0 | 327\n>\n>\n> node32:\n>\n> NOTICE: num_keys = 16, height = 3, n4 = 0, n15 = 15, n32 = 0, n128 = 0, n256 = 0\n> fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n> --------+-------+------------------+------------+--------------\n> 2 | 16 | 16488 | 0 | 5\n> (1 row)\n>\n> NOTICE: num_keys = 81, height = 3, n4 = 0, n15 = 40, n32 = 0, n128 = 0, n256 = 0\n> fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n> --------+-------+------------------+------------+--------------\n> 3 | 81 | 16520 | 0 | 28\n>\n> NOTICE: num_keys = 256, height = 3, n4 = 0, n15 = 85, n32 = 0, n128 = 0, n256 = 0\n> fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n> --------+-------+------------------+------------+--------------\n> 4 | 256 | 16408 | 0 | 79\n>\n> NOTICE: num_keys = 625, height = 3, n4 = 0, n15 = 156, n32 = 0, n128 = 0, n256 = 0\n> fanout | nkeys | rt_mem_allocated | rt_load_ms | rt_search_ms\n> --------+-------+------------------+------------+--------------\n> 5 | 625 | 24616 | 0 | 199\n>\n> In this test, node32 seems slightly faster than node4 with 4 elements, at the cost of more memory.\n>\n> Assuming the smallest node is fixed size (i.e. fanout/capacity member not part of the common set, so only part of variable-sized nodes), 3 has a nice property: no wasted padding space:\n>\n> node4: 5 + 4+(7) + 4*8 = 48 bytes\n> node3: 5 + 3 + 3*8 = 32\n\nIIUC if we store the fanout member only in variable-sized nodes,\nrt_node has only count, shift, and chunk, so 4 bytes in total. If so,\nthe size of node3 (ie. fixed-sized node) is (4 + 3 + (1) + 3*8)? The\nsize doesn't change but there is 1 byte padding space.\n\nAlso, even if we have the node3 a variable-sized node, size class 1\nfor node3 could be a good choice since it also doesn't need padding\nspace and could be a good alternative to path compression.\n\nnode3 : 5 + 3 + 3*8 = 32 bytes\nsize class 1 : 5 + 3 + 1*8 = 16 bytes\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 21 Nov 2022 17:42:34 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Nov 21, 2022 at 3:43 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Mon, Nov 21, 2022 at 4:20 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\n> > Assuming the smallest node is fixed size (i.e. fanout/capacity member\nnot part of the common set, so only part of variable-sized nodes), 3 has a\nnice property: no wasted padding space:\n> >\n> > node4: 5 + 4+(7) + 4*8 = 48 bytes\n> > node3: 5 + 3 + 3*8 = 32\n>\n> IIUC if we store the fanout member only in variable-sized nodes,\n> rt_node has only count, shift, and chunk, so 4 bytes in total. If so,\n> the size of node3 (ie. fixed-sized node) is (4 + 3 + (1) + 3*8)? The\n> size doesn't change but there is 1 byte padding space.\n\nI forgot to mention I'm assuming no pointer-tagging for this exercise.\nYou've demonstrated it can be done in a small amount of code, and I hope we\ncan demonstrate a speedup in search. Just in case there is some issue with\nportability, valgrind, or some other obstacle, I'm being pessimistic in my\ncalculations.\n\n> Also, even if we have the node3 a variable-sized node, size class 1\n> for node3 could be a good choice since it also doesn't need padding\n> space and could be a good alternative to path compression.\n>\n> node3 : 5 + 3 + 3*8 = 32 bytes\n> size class 1 : 5 + 3 + 1*8 = 16 bytes\n\nPrecisely! I have that scenario in my notes as well -- it's quite\ncompelling.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Nov 21, 2022 at 3:43 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Mon, Nov 21, 2022 at 4:20 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> > Assuming the smallest node is fixed size (i.e. fanout/capacity member not part of the common set, so only part of variable-sized nodes), 3 has a nice property: no wasted padding space:> >> > node4: 5 + 4+(7) + 4*8 = 48 bytes> > node3: 5 + 3     + 3*8 = 32>> IIUC if we store the fanout member only in variable-sized nodes,> rt_node has only count, shift, and chunk, so 4 bytes in total. If so,> the size of node3 (ie. fixed-sized node) is (4 + 3 + (1) + 3*8)? The> size doesn't change but there is 1 byte padding space.I forgot to mention I'm assuming no pointer-tagging for this exercise. You've demonstrated it can be done in a small amount of code, and I hope we can demonstrate a speedup in search. Just in case there is some issue with portability, valgrind, or some other obstacle, I'm being pessimistic in my calculations.> Also, even if we have the node3 a variable-sized node, size class 1> for node3 could be a good choice since it also doesn't need padding> space and could be a good alternative to path compression.>> node3         :  5 + 3 + 3*8 = 32 bytes> size class 1 : 5 + 3 + 1*8 = 16 bytesPrecisely! I have that scenario in my notes as well -- it's quite compelling.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 21 Nov 2022 16:30:29 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On 2022-11-21 17:06:56 +0900, Masahiko Sawada wrote:\n> Sure. I've attached the v10 patches. 0004 is the pure refactoring\n> patch and 0005 patch introduces the pointer tagging.\n\nThis failed on cfbot, with som many crashes that the VM ran out of disk for\ncore dumps. During testing with 32bit, so there's probably something broken\naround that.\n\nhttps://cirrus-ci.com/task/4635135954386944\n\nA failure is e.g. at: https://api.cirrus-ci.com/v1/artifact/task/4635135954386944/testrun/build-32/testrun/adminpack/regress/log/initdb.log\n\nperforming post-bootstrap initialization ... ../src/backend/lib/radixtree.c:1696:21: runtime error: member access within misaligned address 0x590faf74 for type 'struct radix_tree_control', which requires 8 byte alignment\n0x590faf74: note: pointer points here\n 90 11 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\n ^\n==55813==Using libbacktrace symbolizer.\n #0 0x56dcc274 in rt_create ../src/backend/lib/radixtree.c:1696\n #1 0x56953d1b in tidstore_create ../src/backend/access/common/tidstore.c:57\n #2 0x56a1ca4f in dead_items_alloc ../src/backend/access/heap/vacuumlazy.c:3109\n #3 0x56a2219f in heap_vacuum_rel ../src/backend/access/heap/vacuumlazy.c:539\n #4 0x56cb77ed in table_relation_vacuum ../src/include/access/tableam.h:1681\n #5 0x56cb77ed in vacuum_rel ../src/backend/commands/vacuum.c:2062\n #6 0x56cb9a16 in vacuum ../src/backend/commands/vacuum.c:472\n #7 0x56cba904 in ExecVacuum ../src/backend/commands/vacuum.c:272\n #8 0x5711b6d0 in standard_ProcessUtility ../src/backend/tcop/utility.c:866\n #9 0x5711bdeb in ProcessUtility ../src/backend/tcop/utility.c:530\n #10 0x5711759f in PortalRunUtility ../src/backend/tcop/pquery.c:1158\n #11 0x57117cb8 in PortalRunMulti ../src/backend/tcop/pquery.c:1315\n #12 0x571183d2 in PortalRun ../src/backend/tcop/pquery.c:791\n #13 0x57111049 in exec_simple_query ../src/backend/tcop/postgres.c:1238\n #14 0x57113f9c in PostgresMain ../src/backend/tcop/postgres.c:4551\n #15 0x5711463d in PostgresSingleUserMain ../src/backend/tcop/postgres.c:4028\n #16 0x56df4672 in main ../src/backend/main/main.c:197\n #17 0xf6ad8e45 in __libc_start_main (/lib/i386-linux-gnu/libc.so.6+0x1ae45)\n #18 0x5691d0f0 in _start (/tmp/cirrus-ci-build/build-32/tmp_install/usr/local/pgsql/bin/postgres+0x3040f0)\n\nAborted (core dumped)\nchild process exited with exit code 134\ninitdb: data directory \"/tmp/cirrus-ci-build/build-32/testrun/adminpack/regress/tmp_check/data\" not removed at user's request\n\n\n", "msg_date": "Tue, 22 Nov 2022 09:10:00 -0800", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Nov 21, 2022 at 6:30 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Nov 21, 2022 at 3:43 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Nov 21, 2022 at 4:20 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n>\n> > > Assuming the smallest node is fixed size (i.e. fanout/capacity member not part of the common set, so only part of variable-sized nodes), 3 has a nice property: no wasted padding space:\n> > >\n> > > node4: 5 + 4+(7) + 4*8 = 48 bytes\n> > > node3: 5 + 3 + 3*8 = 32\n> >\n> > IIUC if we store the fanout member only in variable-sized nodes,\n> > rt_node has only count, shift, and chunk, so 4 bytes in total. If so,\n> > the size of node3 (ie. fixed-sized node) is (4 + 3 + (1) + 3*8)? The\n> > size doesn't change but there is 1 byte padding space.\n>\n> I forgot to mention I'm assuming no pointer-tagging for this exercise. You've demonstrated it can be done in a small amount of code, and I hope we can demonstrate a speedup in search. Just in case there is some issue with portability, valgrind, or some other obstacle, I'm being pessimistic in my calculations.\n>\n> > Also, even if we have the node3 a variable-sized node, size class 1\n> > for node3 could be a good choice since it also doesn't need padding\n> > space and could be a good alternative to path compression.\n> >\n> > node3 : 5 + 3 + 3*8 = 32 bytes\n> > size class 1 : 5 + 3 + 1*8 = 16 bytes\n>\n> Precisely! I have that scenario in my notes as well -- it's quite compelling.\n\nSo it seems that there are two candidates of rt_node structure: (1)\nall nodes except for node256 are variable-size nodes and use pointer\ntagging, and (2) node32 and node128 are variable-sized nodes and do\nnot use pointer tagging (fanout is in part of only these two nodes).\nrt_node can be 5 bytes in both cases. But before going to this step, I\nstarted to verify the idea of variable-size nodes by using 6-bytes\nrt_node. We can adjust the node kinds and node classes later.\n\nIn this verification, I have all nodes except for node256\nvariable-sized nodes, and the sizes are:\n\nradix tree node 1 : 6 + 4 + (6) + 1*8 = 24 bytes\nradix tree node 4 : 6 + 4 + (6) + 4*8 = 48\nradix tree node 15 : 6 + 32 + (2) + 15*8 = 160\nradix tree node 32 : 6 + 32 + (2) + 32*8 = 296\nradix tree node 61 : inner 6 + 256 + (2) + 61*8 = 752, leaf 6 +\n256 + (2) + 16 + 61*8 = 768\nradix tree node 128 : inner 6 + 256 + (2) + 128*8 = 1288, leaf 6 +\n256 + (2) + 16 + 128*8 = 1304\nradix tree node 256 : inner 6 + (2) + 256*8 = 2056, leaf 6 + (2) + 32\n+ 256*8 = 2088\n\nI did some performance tests against two radix trees: a radix tree\nsupporting only fixed-size nodes (i.e. applying up to 0003 patch), and\na radix tree supporting variable-size nodes (i.e. applying all\nattached patches). Also, I changed bench_search_random_nodes()\nfunction so that we can specify the filter via a function argument.\nHere are results:\n\nHere are results:\n\n* Query\nselect * from bench_seq_search(0, 1*1000*1000, false)\n\n* Fixed-size\nNOTICE: num_keys = 1000000, height = 2, n4 = 0, n32 = 31251, n128 =\n1, n256 = 122\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 9871216 | | 67 |\n | 212 |\n(1 row)\n\n* Variable-size\nNOTICE: num_keys = 1000000, height = 2, n1 = 0, n4 = 0, n15 = 0, n32\n= 31251, n61 = 0, n128 = 1, n256 = 122\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n---------+------------------+---------------------+------------+---------------+--------------+-----------------\n 1000000 | 9871280 | | 74 |\n | 212 |\n(1 row)\n\n---\n* Query\nselect * from bench_seq_search(0, 2*1000*1000, true)\nNOTICE: num_keys = 999654, height = 2, n4 = 1, n32 = 62499, n128 = 1,\nn256 = 245\n* Fixed-size\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 19680848 | | 74 |\n | 201 |\n(1 row)\n\n* Variable-size\nNOTICE: num_keys = 999654, height = 2, n1 = 0, n4 = 1, n15 = 26951,\nn32 = 35548, n61 = 1, n128 = 0, n256 = 245\n nkeys | rt_mem_allocated | array_mem_allocated | rt_load_ms |\narray_load_ms | rt_search_ms | array_serach_ms\n--------+------------------+---------------------+------------+---------------+--------------+-----------------\n 999654 | 16009040 | | 85 |\n | 201 |\n(1 row)\n\n---\n* Query\nselect * from bench_search_random_nodes(10 * 1000 * 1000, '0x7F07FF00FF')\n\n* Fixed-size\nNOTICE: num_keys = 9291812, height = 4, n4 = 262144, n32 = 79603,\nn128 = 182670, n256 = 1024\n mem_allocated | search_ms\n---------------+-----------\n 343001456 | 1151\n(1 row)\n\n* Variable-size\nNOTICE: num_keys = 9291812, height = 4, n1 = 262144, n4 = 0, n15 =\n138, n32 = 79465, n61 = 182665, n128 = 5, n256 = 1024\n mem_allocated | search_ms\n---------------+-----------\n 230504328 | 1077\n(1 row)\n\n---\n* Query\nselect * from bench_search_random_nodes(10 * 1000 * 1000, '0xFFFF0000003F')\n* Fixed-size\nNOTICE: num_keys = 3807650, height = 5, n4 = 196608, n32 = 0, n128 =\n65536, n256 = 257\n mem_allocated | search_ms\n---------------+-----------\n 99911920 | 632\n(1 row)\n* Variable-size\nNOTICE: num_keys = 3807650, height = 5, n1 = 196608, n4 = 0, n15 = 0,\nn32 = 0, n61 = 61747, n128 = 3789, n256 = 257\n mem_allocated | search_ms\n---------------+-----------\n 64045688 | 554\n(1 row)\n\nOverall, the idea of variable-sized nodes is good, smaller size\nwithout losing search performance. I'm going to check the load\nperformance as well.\n\nI've attached the patches I used for the verification. I don't include\npatches for pointer tagging, DSA support, and vacuum integration since\nI'm investigating the issue on cfbot that Andres reported. Also, I've\nmodified tests to improve the test coverage.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 24 Nov 2022 23:54:06 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Nov 24, 2022 at 9:54 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> So it seems that there are two candidates of rt_node structure: (1)\n> all nodes except for node256 are variable-size nodes and use pointer\n> tagging, and (2) node32 and node128 are variable-sized nodes and do\n> not use pointer tagging (fanout is in part of only these two nodes).\n> rt_node can be 5 bytes in both cases. But before going to this step, I\n> started to verify the idea of variable-size nodes by using 6-bytes\n> rt_node. We can adjust the node kinds and node classes later.\n\nFirst, I'm glad you picked up the size class concept and expanded it. (I\nhave some comments about some internal APIs below.)\n\nLet's leave the pointer tagging piece out until the main functionality is\ncommitted. We have all the prerequisites in place, except for a benchmark\nrandom enough to demonstrate benefit. I'm still not quite satisfied with\nhow the shared memory coding looked, and that is the only sticky problem we\nstill have, IMO. The rest is \"just work\".\n\nThat said, (1) and (2) above are still relevant -- variable sizing any\ngiven node is optional, and we can refine as needed.\n\n> Overall, the idea of variable-sized nodes is good, smaller size\n> without losing search performance.\n\nGood.\n\n> I'm going to check the load\n> performance as well.\n\nPart of that is this, which gets called a lot more now, when node1 expands:\n\n+ if (inner)\n+ newnode = (rt_node *) MemoryContextAllocZero(tree->inner_slabs[kind],\n+ rt_node_kind_info[kind].inner_size);\n+ else\n+ newnode = (rt_node *) MemoryContextAllocZero(tree->leaf_slabs[kind],\n+ rt_node_kind_info[kind].leaf_size);\n\nSince memset for expanding size class is now handled separately, these can\nuse the non-zeroing versions. When compiling MemoryContextAllocZero, the\ncompiler has no idea how big the size is, so it assumes the worst and\noptimizes for large sizes. On x86-64, that means using \"rep stos\",\nwhich calls microcode found in the CPU's ROM. This is slow for small sizes.\nThe \"init\" function should be always inline with const parameters where\npossible. That way, memset can compile to a single instruction for the\nsmallest node kind. (More on alloc/init below)\n\nNote, there is a wrinkle: As currently written inner_node128 searches the\nchild pointers for NULL when inserting, so when expanding from partial to\nfull size class, the new node must be zeroed (Worth fixing in the short\nterm. I thought of this while writing the proof-of-concept for size\nclasses, but didn't mention it.) Medium term, rather than special-casing\nthis, I actually want to rewrite the inner-node128 to be more similar to\nthe leaf, with an \"isset\" array, but accessed and tested differently. I\nguarantee it's *really* slow now to load (maybe somewhat true even for\nleaves), but I'll leave the details for later. Regarding node128 leaf, note\nthat it's slightly larger than a DSA size class, and we can trim it to fit:\n\nnode61: 6 + 256+(2) +16 + 61*8 = 768\nnode125: 6 + 256+(2) +16 + 125*8 = 1280\n\n> I've attached the patches I used for the verification. I don't include\n> patches for pointer tagging, DSA support, and vacuum integration since\n> I'm investigating the issue on cfbot that Andres reported. Also, I've\n> modified tests to improve the test coverage.\n\nSounds good. For v12, I think size classes have proven themselves, so v11's\n0002/4/5 can be squashed. Plus, some additional comments:\n\n+/* Return a new and initialized node */\n+static rt_node *\n+rt_alloc_init_node(radix_tree *tree, uint8 kind, uint8 shift, uint8 chunk,\nbool inner)\n+{\n+ rt_node *newnode;\n+\n+ newnode = rt_alloc_node(tree, kind, inner);\n+ rt_init_node(newnode, kind, shift, chunk, inner);\n+\n+ return newnode;\n+}\n\nI don't see the point of a function that just calls two functions.\n\n+/*\n+ * Create a new node with 'new_kind' and the same shift, chunk, and\n+ * count of 'node'.\n+ */\n+static rt_node *\n+rt_grow_node(radix_tree *tree, rt_node *node, int new_kind)\n+{\n+ rt_node *newnode;\n+\n+ newnode = rt_alloc_init_node(tree, new_kind, node->shift, node->chunk,\n+ node->shift > 0);\n+ newnode->count = node->count;\n+\n+ return newnode;\n+}\n\nThis, in turn, just calls a function that does _almost_ everything, and\nadditionally must set one member. This function should really be alloc-node\n+ init-node + copy-common, where copy-common is like in the prototype:\n+ newnode->node_shift = oldnode->node_shift;\n+ newnode->node_chunk = oldnode->node_chunk;\n+ newnode->count = oldnode->count;\n\nAnd init-node should really be just memset + set kind + set initial fanout.\nIt has no business touching \"shift\" and \"chunk\". The callers rt_new_root,\nrt_set_extend, and rt_extend set some values of their own anyway, so let\nthem set those, too -- it might even improve readability.\n\n- if (n32->base.n.fanout ==\nrt_size_class_info[RT_CLASS_32_PARTIAL].fanout)\n+ if (NODE_NEEDS_TO_GROW_CLASS(n32, RT_CLASS_32_PARTIAL))\n\nThis macro doesn't really improve readability -- it obscures what is being\ntested, and the name implies the \"else\" branch means \"node doesn't need to\ngrow class\", which is false. If we want to simplify expressions in this\nblock, I think it'd be more effective to improve the lines that follow:\n\n+ memcpy(new32, n32, rt_size_class_info[RT_CLASS_32_PARTIAL].inner_size);\n+ new32->base.n.fanout = rt_size_class_info[RT_CLASS_32_FULL].fanout;\n\nMaybe we can have const variables old_size and new_fanout to break out the\narray lookup? While I'm thinking of it, these arrays should be const so the\ncompiler can avoid runtime lookups. Speaking of...\n\n+/* Copy both chunks and children/values arrays */\n+static inline void\n+chunk_children_array_copy(uint8 *src_chunks, rt_node **src_children,\n+ uint8 *dst_chunks, rt_node **dst_children, int count)\n+{\n+ /* For better code generation */\n+ if (count > rt_node_kind_info[RT_NODE_KIND_4].fanout)\n+ pg_unreachable();\n+\n+ memcpy(dst_chunks, src_chunks, sizeof(uint8) * count);\n+ memcpy(dst_children, src_children, sizeof(rt_node *) * count);\n+}\n\nWhen I looked at this earlier, I somehow didn't go far enough -- why are we\npassing the runtime count in the first place? This function can only be\ncalled if count == rt_size_class_info[RT_CLASS_4_FULL].fanout. The last\nparameter to memcpy should evaluate to a compile-time constant, right? Even\nwhen we add node shrinking in the future, the constant should be correct,\nIIUC?\n\n- .fanout = 256,\n+ /* technically it's 256, but we can't store that in a uint8,\n+ and this is the max size class so it will never grow */\n+ .fanout = 0,\n\n- Assert(chunk_exists || NODE_HAS_FREE_SLOT(n256));\n+ Assert(((rt_node *) n256)->fanout == 0);\n+ Assert(chunk_exists || ((rt_node *) n256)->count < 256);\n\nThese hacks were my work, but I think we can improve that by having two\nversions of NODE_HAS_FREE_SLOT -- one for fixed- and one for variable-sized\nnodes. For that to work, in \"init-node\" we'd need a branch to set fanout to\nzero for node256. That should be fine -- it already has to branch for\nmemset'ing node128's indexes to 0xFF.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Nov 24, 2022 at 9:54 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> So it seems that there are two candidates of rt_node structure: (1)> all nodes except for node256 are variable-size nodes and use pointer> tagging, and (2) node32 and node128 are variable-sized nodes and do> not use pointer tagging (fanout is in part of only these two nodes).> rt_node can be 5 bytes in both cases. But before going to this step, I> started to verify the idea of variable-size nodes by using 6-bytes> rt_node. We can adjust the node kinds and node classes later.First, I'm glad you picked up the size class concept and expanded it. (I have some comments about some internal APIs below.)Let's leave the pointer tagging piece out until the main functionality is committed. We have all the prerequisites in place, except for a benchmark random enough to demonstrate benefit. I'm still not quite satisfied with how the shared memory coding looked, and that is the only sticky problem we still have, IMO. The rest is \"just work\".That said, (1) and (2) above are still relevant -- variable sizing any given node is optional, and we can refine as needed.> Overall, the idea of variable-sized nodes is good, smaller size> without losing search performance. Good.> I'm going to check the load> performance as well.Part of that is this, which gets called a lot more now, when node1 expands:+\tif (inner)+\t\tnewnode = (rt_node *) MemoryContextAllocZero(tree->inner_slabs[kind],+\t\t\t\t\t\t\t\t\t\t\t\t\t rt_node_kind_info[kind].inner_size);+\telse+\t\tnewnode = (rt_node *) MemoryContextAllocZero(tree->leaf_slabs[kind],+\t\t\t\t\t\t\t\t\t\t\t\t\t rt_node_kind_info[kind].leaf_size);Since memset for expanding size class is now handled separately, these can use the non-zeroing versions. When compiling MemoryContextAllocZero, the compiler has no idea how big the size is, so it assumes the worst and optimizes for large sizes. On x86-64, that means using \"rep stos\", which calls microcode found in the CPU's ROM. This is slow for small sizes. The \"init\" function should be always inline with const parameters where possible. That way, memset can compile to a single instruction for the smallest node kind. (More on alloc/init below)Note, there is a wrinkle: As currently written inner_node128 searches the child pointers for NULL when inserting, so when expanding from partial to full size class, the new node must be zeroed (Worth fixing in the short term. I thought of this while writing the proof-of-concept for size classes, but didn't mention it.) Medium term, rather than special-casing this, I actually want to rewrite the inner-node128 to be more similar to the leaf, with an \"isset\" array, but accessed and tested differently. I guarantee it's *really* slow now to load (maybe somewhat true even for leaves), but I'll leave the details for later. Regarding node128 leaf, note that it's slightly larger than a DSA size class, and we can trim it to fit:node61:  6 + 256+(2) +16 +  61*8 =  768node125: 6 + 256+(2) +16 + 125*8 = 1280> I've attached the patches I used for the verification. I don't include> patches for pointer tagging, DSA support, and vacuum integration since> I'm investigating the issue on cfbot that Andres reported. Also, I've> modified tests to improve the test coverage.Sounds good. For v12, I think size classes have proven themselves, so v11's 0002/4/5 can be squashed. Plus, some additional comments:+/* Return a new and initialized node */+static rt_node *+rt_alloc_init_node(radix_tree *tree, uint8 kind, uint8 shift, uint8 chunk, bool inner)+{+\trt_node *newnode;++\tnewnode = rt_alloc_node(tree, kind, inner);+\trt_init_node(newnode, kind, shift, chunk, inner);++\treturn newnode;+}I don't see the point of a function that just calls two functions.+/*+ * Create a new node with 'new_kind' and the same shift, chunk, and+ * count of 'node'.+ */+static rt_node *+rt_grow_node(radix_tree *tree, rt_node *node, int new_kind)+{+\trt_node    *newnode;++\tnewnode = rt_alloc_init_node(tree, new_kind, node->shift, node->chunk,+\t\t\t\t\t\t\t\t node->shift > 0);+\tnewnode->count = node->count;++\treturn newnode;+}This, in turn, just calls a function that does _almost_ everything, and additionally must set one member. This function should really be alloc-node + init-node + copy-common, where copy-common is like in the prototype:+ newnode->node_shift = oldnode->node_shift;+ newnode->node_chunk = oldnode->node_chunk;+ newnode->count = oldnode->count;And init-node should really be just memset + set kind + set initial fanout. It has no business touching \"shift\" and \"chunk\". The callers rt_new_root, rt_set_extend, and rt_extend set some values of their own anyway, so let them set those, too -- it might even improve readability.-       if (n32->base.n.fanout == rt_size_class_info[RT_CLASS_32_PARTIAL].fanout)+       if (NODE_NEEDS_TO_GROW_CLASS(n32, RT_CLASS_32_PARTIAL))This macro doesn't really improve readability -- it obscures what is being tested, and the name implies the \"else\" branch means \"node doesn't need to grow class\", which is false. If we want to simplify expressions in this block, I think it'd be more effective to improve the lines that follow:+\tmemcpy(new32, n32, rt_size_class_info[RT_CLASS_32_PARTIAL].inner_size);+\tnew32->base.n.fanout = rt_size_class_info[RT_CLASS_32_FULL].fanout;Maybe we can have const variables old_size and new_fanout to break out the array lookup? While I'm thinking of it, these arrays should be const so the compiler can avoid runtime lookups. Speaking of...+/* Copy both chunks and children/values arrays */+static inline void+chunk_children_array_copy(uint8 *src_chunks, rt_node **src_children,+\t\t\t\t\t\t  uint8 *dst_chunks, rt_node **dst_children, int count)+{+\t/* For better code generation */+\tif (count > rt_node_kind_info[RT_NODE_KIND_4].fanout)+\t\tpg_unreachable();++\tmemcpy(dst_chunks, src_chunks, sizeof(uint8) * count);+\tmemcpy(dst_children, src_children, sizeof(rt_node *) * count);+}When I looked at this earlier, I somehow didn't go far enough -- why are we passing the runtime count in the first place? This function can only be called if count == rt_size_class_info[RT_CLASS_4_FULL].fanout. The last parameter to memcpy should evaluate to a compile-time constant, right? Even when we add node shrinking in the future, the constant should be correct, IIUC?-\t\t.fanout = 256,+\t\t/* technically it's 256, but we can't store that in a uint8,+\t\t  and this is the max size class so it will never grow */+\t\t.fanout = 0,-\t\t\t\tAssert(chunk_exists || NODE_HAS_FREE_SLOT(n256));+\t\t\t\tAssert(((rt_node *) n256)->fanout == 0);+\t\t\t\tAssert(chunk_exists || ((rt_node *) n256)->count < 256);These hacks were my work, but I think we can improve that by having two versions of NODE_HAS_FREE_SLOT -- one for fixed- and one for variable-sized nodes. For that to work, in \"init-node\" we'd need a branch to set fanout to zero for node256. That should be fine -- it already has to branch for memset'ing node128's indexes to 0xFF.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Fri, 25 Nov 2022 15:00:16 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Nov 24, 2022 at 9:54 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> [v11]\n\nThere is one more thing that just now occurred to me: In expanding the use\nof size classes, that makes rebasing and reworking the shared memory piece\nmore work than it should be. That's important because there are still some\nopen questions about the design around shared memory. To keep unnecessary\nchurn to a minimum, perhaps we should limit size class expansion to just\none (or 5 total size classes) for the near future?\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Nov 24, 2022 at 9:54 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> [v11]There is one more thing that just now occurred to me: In expanding the use of size classes, that makes rebasing and reworking the shared memory piece more work than it should be. That's important because there are still some open questions about the design around shared memory. To keep unnecessary churn to a minimum, perhaps we should limit size class expansion to just one (or 5 total size classes) for the near future?--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Fri, 25 Nov 2022 16:47:20 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "While creating a benchmark for inserting into node128-inner, I found a bug.\nIf a caller deletes from a node128, the slot index is set to invalid, but\nthe child pointer is still valid. Do that a few times, and every child\npointer is valid, even if no slot index points to it. When the next\ninserter comes along, something surprising happens. This function:\n\n/* Return an unused slot in node-128 */\nstatic int\nnode_inner_128_find_unused_slot(rt_node_inner_128 *node, uint8 chunk)\n{\n int slotpos = 0;\n\n Assert(!NODE_IS_LEAF(node));\n while (node_inner_128_is_slot_used(node, slotpos))\n slotpos++;\n\n return slotpos;\n}\n\n...passes an integer to this function, whose parameter is a uint8:\n\n/* Is the slot in the node used? */\nstatic inline bool\nnode_inner_128_is_slot_used(rt_node_inner_128 *node, uint8 slot)\n{\n Assert(!NODE_IS_LEAF(node));\n return (node->children[slot] != NULL);\n}\n\n...so instead of growing the node unnecessarily or segfaulting, it enters\nan infinite loop doing this:\n\nadd eax, 1\nmovzx ecx, al\ncmp QWORD PTR [rbx+264+rcx*8], 0\njne .L147\n\nThe fix is easy enough -- set the child pointer to null upon deletion, but\nI'm somewhat astonished that the regression tests didn't hit this. I do\nstill intend to replace this code with something faster, but before I do so\nthe tests should probably exercise the deletion paths more. Since VACUUM\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nWhile creating a benchmark for inserting into node128-inner, I found a bug. If a caller deletes from a node128, the slot index is set to invalid, but the child pointer is still valid. Do that a few times, and every child pointer is valid, even if no slot index points to it. When the next inserter comes along, something surprising happens. This function:/* Return an unused slot in node-128 */static intnode_inner_128_find_unused_slot(rt_node_inner_128 *node, uint8 chunk){  int\t\t\tslotpos = 0;  Assert(!NODE_IS_LEAF(node));  while (node_inner_128_is_slot_used(node, slotpos))  slotpos++;  return slotpos;}...passes an integer to this function, whose parameter is a uint8:/* Is the slot in the node used? */static inline boolnode_inner_128_is_slot_used(rt_node_inner_128 *node, uint8 slot){  Assert(!NODE_IS_LEAF(node));  return (node->children[slot] != NULL);}...so instead of growing the node unnecessarily or segfaulting, it enters an infinite loop doing this:add     eax, 1movzx   ecx, alcmp     QWORD PTR [rbx+264+rcx*8], 0jne     .L147The fix is easy enough -- set the child pointer to null upon deletion, but I'm somewhat astonished that the regression tests didn't hit this. I do still intend to replace this code with something faster, but before I do so the tests should probably exercise the deletion paths more. Since VACUUM--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Tue, 29 Nov 2022 11:35:55 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "> The fix is easy enough -- set the child pointer to null upon deletion,\nbut I'm somewhat astonished that the regression tests didn't hit this. I do\nstill intend to replace this code with something faster, but before I do so\nthe tests should probably exercise the deletion paths more. Since VACUUM\n\nOops. I meant to finish with \"Since VACUUM doesn't perform deletion we\ndidn't have an opportunity to detect this during that operation.\"\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\n> The fix is easy enough -- set the child pointer to null upon deletion, but I'm somewhat astonished that the regression tests didn't hit this. I do still intend to replace this code with something faster, but before I do so the tests should probably exercise the deletion paths more. Since VACUUMOops. I meant to finish with \"Since VACUUM doesn't perform deletion we didn't have an opportunity to detect this during that operation.\"--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Tue, 29 Nov 2022 11:37:38 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "There are a few things up in the air, so I'm coming back to this list to\nsummarize and add a recent update:\n\nOn Mon, Nov 14, 2022 at 7:59 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n>\n> - See how much performance we actually gain from tagging the node kind.\n\nNeeds a benchmark that has enough branch mispredicts and L2/3 misses to\nshow a benefit. Otherwise either neutral or worse in its current form,\ndepending on compiler(?). Put off for later.\n\n> - Try additional size classes while keeping the node kinds to only four.\n\nThis is relatively simple and effective. If only one additional size class\n(total 5) is coded as a placeholder, I imagine it will be easier to rebase\nshared memory logic than using this technique everywhere possible.\n\n> - Optimize node128 insert.\n\nI've attached a rough start at this. The basic idea is borrowed from our\nbitmapset nodes, so we can iterate over and operate on word-sized (32- or\n64-bit) types at a time, rather than bytes. To make this easier, I've moved\nsome of the lower-level macros and types from bitmapset.h/.c to\npg_bitutils.h. That's probably going to need a separate email thread to\nresolve the coding style clash this causes, so that can be put off for\nlater. This is not meant to be included in the next patchset. For\ndemonstration purposes, I get these results with a function that repeatedly\ndeletes the last value from a mostly-full node128 leaf and re-inserts it:\n\nselect * from bench_node128_load(120);\n\nv11\n\nNOTICE: num_keys = 14400, height = 1, n1 = 0, n4 = 0, n15 = 0, n32 = 0,\nn61 = 0, n128 = 121, n256 = 0\n fanout | nkeys | rt_mem_allocated | rt_sparseload_ms\n--------+-------+------------------+------------------\n 120 | 14400 | 208304 | 56\n\nv11 + 0006 addendum\n\nNOTICE: num_keys = 14400, height = 1, n1 = 0, n4 = 0, n15 = 0, n32 = 0,\nn61 = 0, n128 = 121, n256 = 0\n fanout | nkeys | rt_mem_allocated | rt_sparseload_ms\n--------+-------+------------------+------------------\n 120 | 14400 | 208816 | 34\n\nI didn't test inner nodes, but I imagine the difference is bigger. This\nbitmap style should also be used for the node256-leaf isset array simply to\nbe consistent and avoid needing single-use macros, but that has not been\ndone yet. It won't make a difference for performance because there is no\niteration there.\n\n> - Try templating out the differences between local and shared memory.\n\nI hope to start this sometime after the crashes on 32-bit are resolved.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 30 Nov 2022 12:51:03 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Nov 25, 2022 at 5:00 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Thu, Nov 24, 2022 at 9:54 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > So it seems that there are two candidates of rt_node structure: (1)\n> > all nodes except for node256 are variable-size nodes and use pointer\n> > tagging, and (2) node32 and node128 are variable-sized nodes and do\n> > not use pointer tagging (fanout is in part of only these two nodes).\n> > rt_node can be 5 bytes in both cases. But before going to this step, I\n> > started to verify the idea of variable-size nodes by using 6-bytes\n> > rt_node. We can adjust the node kinds and node classes later.\n>\n> First, I'm glad you picked up the size class concept and expanded it. (I have some comments about some internal APIs below.)\n>\n> Let's leave the pointer tagging piece out until the main functionality is committed. We have all the prerequisites in place, except for a benchmark random enough to demonstrate benefit. I'm still not quite satisfied with how the shared memory coding looked, and that is the only sticky problem we still have, IMO. The rest is \"just work\".\n>\n> That said, (1) and (2) above are still relevant -- variable sizing any given node is optional, and we can refine as needed.\n>\n> > Overall, the idea of variable-sized nodes is good, smaller size\n> > without losing search performance.\n>\n> Good.\n>\n> > I'm going to check the load\n> > performance as well.\n>\n> Part of that is this, which gets called a lot more now, when node1 expands:\n>\n> + if (inner)\n> + newnode = (rt_node *) MemoryContextAllocZero(tree->inner_slabs[kind],\n> + rt_node_kind_info[kind].inner_size);\n> + else\n> + newnode = (rt_node *) MemoryContextAllocZero(tree->leaf_slabs[kind],\n> + rt_node_kind_info[kind].leaf_size);\n>\n> Since memset for expanding size class is now handled separately, these can use the non-zeroing versions. When compiling MemoryContextAllocZero, the compiler has no idea how big the size is, so it assumes the worst and optimizes for large sizes. On x86-64, that means using \"rep stos\", which calls microcode found in the CPU's ROM. This is slow for small sizes. The \"init\" function should be always inline with const parameters where possible. That way, memset can compile to a single instruction for the smallest node kind. (More on alloc/init below)\n\nRight. I forgot to update it.\n\n>\n> Note, there is a wrinkle: As currently written inner_node128 searches the child pointers for NULL when inserting, so when expanding from partial to full size class, the new node must be zeroed (Worth fixing in the short term. I thought of this while writing the proof-of-concept for size classes, but didn't mention it.) Medium term, rather than special-casing this, I actually want to rewrite the inner-node128 to be more similar to the leaf, with an \"isset\" array, but accessed and tested differently. I guarantee it's *really* slow now to load (maybe somewhat true even for leaves), but I'll leave the details for later.\n\nAgreed, I start with zeroing out the node when expanding from partial\nto full size.\n\n> Regarding node128 leaf, note that it's slightly larger than a DSA size class, and we can trim it to fit:\n>\n> node61: 6 + 256+(2) +16 + 61*8 = 768\n> node125: 6 + 256+(2) +16 + 125*8 = 1280\n\nAgreed, changed.\n\n>\n> > I've attached the patches I used for the verification. I don't include\n> > patches for pointer tagging, DSA support, and vacuum integration since\n> > I'm investigating the issue on cfbot that Andres reported. Also, I've\n> > modified tests to improve the test coverage.\n>\n> Sounds good. For v12, I think size classes have proven themselves, so v11's 0002/4/5 can be squashed. Plus, some additional comments:\n>\n> +/* Return a new and initialized node */\n> +static rt_node *\n> +rt_alloc_init_node(radix_tree *tree, uint8 kind, uint8 shift, uint8 chunk, bool inner)\n> +{\n> + rt_node *newnode;\n> +\n> + newnode = rt_alloc_node(tree, kind, inner);\n> + rt_init_node(newnode, kind, shift, chunk, inner);\n> +\n> + return newnode;\n> +}\n>\n> I don't see the point of a function that just calls two functions.\n\nRemoved.\n\n>\n> +/*\n> + * Create a new node with 'new_kind' and the same shift, chunk, and\n> + * count of 'node'.\n> + */\n> +static rt_node *\n> +rt_grow_node(radix_tree *tree, rt_node *node, int new_kind)\n> +{\n> + rt_node *newnode;\n> +\n> + newnode = rt_alloc_init_node(tree, new_kind, node->shift, node->chunk,\n> + node->shift > 0);\n> + newnode->count = node->count;\n> +\n> + return newnode;\n> +}\n>\n> This, in turn, just calls a function that does _almost_ everything, and additionally must set one member. This function should really be alloc-node + init-node + copy-common, where copy-common is like in the prototype:\n> + newnode->node_shift = oldnode->node_shift;\n> + newnode->node_chunk = oldnode->node_chunk;\n> + newnode->count = oldnode->count;\n>\n> And init-node should really be just memset + set kind + set initial fanout. It has no business touching \"shift\" and \"chunk\". The callers rt_new_root, rt_set_extend, and rt_extend set some values of their own anyway, so let them set those, too -- it might even improve readability.\n>\n> - if (n32->base.n.fanout == rt_size_class_info[RT_CLASS_32_PARTIAL].fanout)\n> + if (NODE_NEEDS_TO_GROW_CLASS(n32, RT_CLASS_32_PARTIAL))\n\nAgreed.\n\n>\n> This macro doesn't really improve readability -- it obscures what is being tested, and the name implies the \"else\" branch means \"node doesn't need to grow class\", which is false. If we want to simplify expressions in this block, I think it'd be more effective to improve the lines that follow:\n>\n> + memcpy(new32, n32, rt_size_class_info[RT_CLASS_32_PARTIAL].inner_size);\n> + new32->base.n.fanout = rt_size_class_info[RT_CLASS_32_FULL].fanout;\n>\n> Maybe we can have const variables old_size and new_fanout to break out the array lookup? While I'm thinking of it, these arrays should be const so the compiler can avoid runtime lookups. Speaking of...\n>\n> +/* Copy both chunks and children/values arrays */\n> +static inline void\n> +chunk_children_array_copy(uint8 *src_chunks, rt_node **src_children,\n> + uint8 *dst_chunks, rt_node **dst_children, int count)\n> +{\n> + /* For better code generation */\n> + if (count > rt_node_kind_info[RT_NODE_KIND_4].fanout)\n> + pg_unreachable();\n> +\n> + memcpy(dst_chunks, src_chunks, sizeof(uint8) * count);\n> + memcpy(dst_children, src_children, sizeof(rt_node *) * count);\n> +}\n>\n> When I looked at this earlier, I somehow didn't go far enough -- why are we passing the runtime count in the first place? This function can only be called if count == rt_size_class_info[RT_CLASS_4_FULL].fanout. The last parameter to memcpy should evaluate to a compile-time constant, right? Even when we add node shrinking in the future, the constant should be correct, IIUC?\n\nRight. We don't need to pass count to these functions.\n\n>\n> - .fanout = 256,\n> + /* technically it's 256, but we can't store that in a uint8,\n> + and this is the max size class so it will never grow */\n> + .fanout = 0,\n>\n> - Assert(chunk_exists || NODE_HAS_FREE_SLOT(n256));\n> + Assert(((rt_node *) n256)->fanout == 0);\n> + Assert(chunk_exists || ((rt_node *) n256)->count < 256);\n>\n> These hacks were my work, but I think we can improve that by having two versions of NODE_HAS_FREE_SLOT -- one for fixed- and one for variable-sized nodes. For that to work, in \"init-node\" we'd need a branch to set fanout to zero for node256. That should be fine -- it already has to branch for memset'ing node128's indexes to 0xFF.\n\nSince the node has fanout regardless of fixed-sized and\nvariable-sized, only node256 is the special case where the fanout in\nthe node doesn't match the actual fanout of the node. I think if we\nwant to have two versions of NODE_HAS_FREE_SLOT, we can have one for\nnode256 and one for other classes. Thoughts? In your idea, for\nNODE_HAS_FREE_SLOT for fixed-sized nodes, you meant like the\nfollowing?\n\n#define FIXED_NODDE_HAS_FREE_SLOT(node, class)\n (node->base.n.count < rt_size_class_info[class].fanout)\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 30 Nov 2022 16:27:59 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Nov 25, 2022 at 6:47 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n>\n> On Thu, Nov 24, 2022 at 9:54 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > [v11]\n>\n> There is one more thing that just now occurred to me: In expanding the use of size classes, that makes rebasing and reworking the shared memory piece more work than it should be. That's important because there are still some open questions about the design around shared memory. To keep unnecessary churn to a minimum, perhaps we should limit size class expansion to just one (or 5 total size classes) for the near future?\n\nMake sense. We can add size classes once we have a good design and\nimplementation around shared memory.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 30 Nov 2022 16:30:15 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Nov 29, 2022 at 1:36 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> While creating a benchmark for inserting into node128-inner, I found a bug. If a caller deletes from a node128, the slot index is set to invalid, but the child pointer is still valid. Do that a few times, and every child pointer is valid, even if no slot index points to it. When the next inserter comes along, something surprising happens. This function:\n>\n> /* Return an unused slot in node-128 */\n> static int\n> node_inner_128_find_unused_slot(rt_node_inner_128 *node, uint8 chunk)\n> {\n> int slotpos = 0;\n>\n> Assert(!NODE_IS_LEAF(node));\n> while (node_inner_128_is_slot_used(node, slotpos))\n> slotpos++;\n>\n> return slotpos;\n> }\n>\n> ...passes an integer to this function, whose parameter is a uint8:\n>\n> /* Is the slot in the node used? */\n> static inline bool\n> node_inner_128_is_slot_used(rt_node_inner_128 *node, uint8 slot)\n> {\n> Assert(!NODE_IS_LEAF(node));\n> return (node->children[slot] != NULL);\n> }\n>\n> ...so instead of growing the node unnecessarily or segfaulting, it enters an infinite loop doing this:\n>\n> add eax, 1\n> movzx ecx, al\n> cmp QWORD PTR [rbx+264+rcx*8], 0\n> jne .L147\n>\n> The fix is easy enough -- set the child pointer to null upon deletion,\n\nGood catch!\n\n> but I'm somewhat astonished that the regression tests didn't hit this. I do still intend to replace this code with something faster, but before I do so the tests should probably exercise the deletion paths more. Since VACUUM\n\nIndeed, there are some tests for deletion but all of them delete all\nkeys in the node so we end up deleting the node. I've added tests of\nrepeating deletion and insertion as well as additional assertions.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 30 Nov 2022 17:53:20 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Nov 23, 2022 at 2:10 AM Andres Freund <andres@anarazel.de> wrote:\n>\n> On 2022-11-21 17:06:56 +0900, Masahiko Sawada wrote:\n> > Sure. I've attached the v10 patches. 0004 is the pure refactoring\n> > patch and 0005 patch introduces the pointer tagging.\n>\n> This failed on cfbot, with som many crashes that the VM ran out of disk for\n> core dumps. During testing with 32bit, so there's probably something broken\n> around that.\n>\n> https://cirrus-ci.com/task/4635135954386944\n>\n> A failure is e.g. at: https://api.cirrus-ci.com/v1/artifact/task/4635135954386944/testrun/build-32/testrun/adminpack/regress/log/initdb.log\n>\n> performing post-bootstrap initialization ... ../src/backend/lib/radixtree.c:1696:21: runtime error: member access within misaligned address 0x590faf74 for type 'struct radix_tree_control', which requires 8 byte alignment\n> 0x590faf74: note: pointer points here\n> 90 11 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00\n> ^\n\nradix_tree_control struct has two pg_atomic_uint64 variables, and the\nassertion check in pg_atomic_init_u64() failed:\n\nstatic inline void\npg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)\n{\n /*\n * Can't necessarily enforce alignment - and don't need it - when using\n * the spinlock based fallback implementation. Therefore only assert when\n * not using it.\n */\n#ifndef PG_HAVE_ATOMIC_U64_SIMULATION\n AssertPointerAlignment(ptr, 8);\n#endif\n pg_atomic_init_u64_impl(ptr, val);\n}\n\nI've investigated this issue and have a question about using atomic\nvariables on palloc'ed memory. In non-parallel vacuum cases,\nradix_tree_control is allocated via aset.c. IIUC in 32-bit machines,\nthe memory allocated by aset.c is 4-bytes aligned so these atomic\nvariables are not always 8-bytes aligned. Is there any way to enforce\n8-bytes aligned memory allocations in 32-bit machines?\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 1 Dec 2022 01:08:27 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Nov 30, 2022 at 11:09 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> I've investigated this issue and have a question about using atomic\n> variables on palloc'ed memory. In non-parallel vacuum cases,\n> radix_tree_control is allocated via aset.c. IIUC in 32-bit machines,\n> the memory allocated by aset.c is 4-bytes aligned so these atomic\n> variables are not always 8-bytes aligned. Is there any way to enforce\n> 8-bytes aligned memory allocations in 32-bit machines?\n\nThe bigger question in my mind is: Why is there an atomic variable in\nbackend-local memory?\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Wed, Nov 30, 2022 at 11:09 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> I've investigated this issue and have a question about using atomic> variables on palloc'ed memory. In non-parallel vacuum cases,> radix_tree_control is allocated via aset.c. IIUC in 32-bit machines,> the memory allocated by aset.c is 4-bytes aligned so these atomic> variables are not always 8-bytes aligned. Is there any way to enforce> 8-bytes aligned memory allocations in 32-bit machines?The bigger question in my mind is: Why is there an atomic variable in backend-local memory?--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 1 Dec 2022 14:00:43 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Nov 30, 2022 at 2:28 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Fri, Nov 25, 2022 at 5:00 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\n> > These hacks were my work, but I think we can improve that by having two\nversions of NODE_HAS_FREE_SLOT -- one for fixed- and one for variable-sized\nnodes. For that to work, in \"init-node\" we'd need a branch to set fanout to\nzero for node256. That should be fine -- it already has to branch for\nmemset'ing node128's indexes to 0xFF.\n>\n> Since the node has fanout regardless of fixed-sized and\n> variable-sized\n\nAs currently coded, yes. But that's not strictly necessary, I think.\n\n>, only node256 is the special case where the fanout in\n> the node doesn't match the actual fanout of the node. I think if we\n> want to have two versions of NODE_HAS_FREE_SLOT, we can have one for\n> node256 and one for other classes. Thoughts? In your idea, for\n> NODE_HAS_FREE_SLOT for fixed-sized nodes, you meant like the\n> following?\n>\n> #define FIXED_NODDE_HAS_FREE_SLOT(node, class)\n> (node->base.n.count < rt_size_class_info[class].fanout)\n\nRight, and the other one could be VAR_NODE_...\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Wed, Nov 30, 2022 at 2:28 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Fri, Nov 25, 2022 at 5:00 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> > These hacks were my work, but I think we can improve that by having two versions of NODE_HAS_FREE_SLOT -- one for fixed- and one for variable-sized nodes. For that to work, in \"init-node\" we'd need a branch to set fanout to zero for node256. That should be fine -- it already has to branch for memset'ing node128's indexes to 0xFF.>> Since the node has fanout regardless of fixed-sized and> variable-sizedAs currently coded, yes. But that's not strictly necessary, I think.>, only node256 is the special case where the fanout in> the node doesn't match the actual fanout of the node. I think if we> want to have two versions of NODE_HAS_FREE_SLOT, we can have one for> node256 and one for other classes. Thoughts? In your idea, for> NODE_HAS_FREE_SLOT for fixed-sized nodes, you meant like the> following?>> #define FIXED_NODDE_HAS_FREE_SLOT(node, class)>   (node->base.n.count < rt_size_class_info[class].fanout)Right, and the other one could be VAR_NODE_...--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 1 Dec 2022 14:06:26 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Dec 1, 2022 at 4:00 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Wed, Nov 30, 2022 at 11:09 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > I've investigated this issue and have a question about using atomic\n> > variables on palloc'ed memory. In non-parallel vacuum cases,\n> > radix_tree_control is allocated via aset.c. IIUC in 32-bit machines,\n> > the memory allocated by aset.c is 4-bytes aligned so these atomic\n> > variables are not always 8-bytes aligned. Is there any way to enforce\n> > 8-bytes aligned memory allocations in 32-bit machines?\n>\n> The bigger question in my mind is: Why is there an atomic variable in backend-local memory?\n\nBecause I use the same radix_tree and radix_tree_control structs for\nnon-parallel and parallel vacuum. Therefore, radix_tree_control is\nallocated in DSM for parallel-vacuum cases or in backend-local memory\nfor non-parallel vacuum cases.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 1 Dec 2022 17:02:57 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Dec 1, 2022 at 3:03 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Thu, Dec 1, 2022 at 4:00 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n> >\n> > The bigger question in my mind is: Why is there an atomic variable in\nbackend-local memory?\n>\n> Because I use the same radix_tree and radix_tree_control structs for\n> non-parallel and parallel vacuum. Therefore, radix_tree_control is\n> allocated in DSM for parallel-vacuum cases or in backend-local memory\n> for non-parallel vacuum cases.\n\nOk, that could be yet another reason to compile local- and shared-memory\nfunctionality separately, but now I'm wondering why there are atomic\nvariables at all, since there isn't yet any locking support.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Dec 1, 2022 at 3:03 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Thu, Dec 1, 2022 at 4:00 PM John Naylor <john.naylor@enterprisedb.com> wrote:> >> > The bigger question in my mind is: Why is there an atomic variable in backend-local memory?>> Because I use the same radix_tree and radix_tree_control structs for> non-parallel and parallel vacuum. Therefore, radix_tree_control is> allocated in DSM for parallel-vacuum cases or in backend-local memory> for non-parallel vacuum cases.Ok, that could be yet another reason to compile local- and shared-memory functionality separately, but now I'm wondering why there are atomic variables at all, since there isn't yet any locking support.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 1 Dec 2022 15:49:09 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Nov 30, 2022 at 2:51 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> There are a few things up in the air, so I'm coming back to this list to summarize and add a recent update:\n>\n> On Mon, Nov 14, 2022 at 7:59 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n> >\n> > - See how much performance we actually gain from tagging the node kind.\n>\n> Needs a benchmark that has enough branch mispredicts and L2/3 misses to show a benefit. Otherwise either neutral or worse in its current form, depending on compiler(?). Put off for later.\n>\n> > - Try additional size classes while keeping the node kinds to only four.\n>\n> This is relatively simple and effective. If only one additional size class (total 5) is coded as a placeholder, I imagine it will be easier to rebase shared memory logic than using this technique everywhere possible.\n>\n> > - Optimize node128 insert.\n>\n> I've attached a rough start at this. The basic idea is borrowed from our bitmapset nodes, so we can iterate over and operate on word-sized (32- or 64-bit) types at a time, rather than bytes.\n\nThanks! I think this is a good idea.\n\n> To make this easier, I've moved some of the lower-level macros and types from bitmapset.h/.c to pg_bitutils.h. That's probably going to need a separate email thread to resolve the coding style clash this causes, so that can be put off for later.\n\nAgreed. Since tidbitmap.c also has WORDNUM(x) and BITNUM(x), we can\nuse it if we move from bitmapset.h.\n\n> This is not meant to be included in the next patchset. For demonstration purposes, I get these results with a function that repeatedly deletes the last value from a mostly-full node128 leaf and re-inserts it:\n>\n> select * from bench_node128_load(120);\n>\n> v11\n>\n> NOTICE: num_keys = 14400, height = 1, n1 = 0, n4 = 0, n15 = 0, n32 = 0, n61 = 0, n128 = 121, n256 = 0\n> fanout | nkeys | rt_mem_allocated | rt_sparseload_ms\n> --------+-------+------------------+------------------\n> 120 | 14400 | 208304 | 56\n>\n> v11 + 0006 addendum\n>\n> NOTICE: num_keys = 14400, height = 1, n1 = 0, n4 = 0, n15 = 0, n32 = 0, n61 = 0, n128 = 121, n256 = 0\n> fanout | nkeys | rt_mem_allocated | rt_sparseload_ms\n> --------+-------+------------------+------------------\n> 120 | 14400 | 208816 | 34\n>\n> I didn't test inner nodes, but I imagine the difference is bigger. This bitmap style should also be used for the node256-leaf isset array simply to be consistent and avoid needing single-use macros, but that has not been done yet. It won't make a difference for performance because there is no iteration there.\n\n\nAfter updating the patch set according to recent comments, I've also\ndone the same test in my environment and got similar good results.\n\nw/o 0006 addendum patch\n\nNOTICE: num_keys = 14400, height = 1, n4 = 0, n15 = 0, n32 = 0, n125\n= 121, n256 = 0\n fanout | nkeys | rt_mem_allocated | rt_sparseload_ms\n--------+-------+------------------+------------------\n 120 | 14400 | 204424 | 29\n(1 row)\n\nw/ 0006 addendum patch\n\nNOTICE: num_keys = 14400, height = 1, n4 = 0, n15 = 0, n32 = 0, n125\n= 121, n256 = 0\n fanout | nkeys | rt_mem_allocated | rt_sparseload_ms\n--------+-------+------------------+------------------\n 120 | 14400 | 204936 | 18\n(1 row)\n\n> > - Try templating out the differences between local and shared memory.\n>\n> I hope to start this sometime after the crashes on 32-bit are resolved.\n\nI've attached updated patches that incorporated all comments I got so\nfar as well as fixes for compiler warnings. I included your bitmapword\npatch as 0004 for benchmarking. Also I reverted the change around\npg_atomic_u64 since we don't support any locking as you mentioned and\nif we have a single lwlock to protect the radix tree, we don't need to\nuse pg_atomic_u64 only for max_val and num_keys.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Sat, 3 Dec 2022 01:41:31 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 2, 2022 at 11:42 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> > On Mon, Nov 14, 2022 at 7:59 PM John Naylor <\njohn.naylor@enterprisedb.com> wrote:\n> > >\n> > > - Optimize node128 insert.\n> >\n> > I've attached a rough start at this. The basic idea is borrowed from\nour bitmapset nodes, so we can iterate over and operate on word-sized (32-\nor 64-bit) types at a time, rather than bytes.\n>\n> Thanks! I think this is a good idea.\n>\n> > To make this easier, I've moved some of the lower-level macros and\ntypes from bitmapset.h/.c to pg_bitutils.h. That's probably going to need a\nseparate email thread to resolve the coding style clash this causes, so\nthat can be put off for later.\n\nI started a separate thread [1], and 0002 comes from feedback on that.\nThere is a FIXME about using WORDNUM and BITNUM, at least with that\nspelling. I'm putting that off to ease rebasing the rest as v13 -- getting\nsome CI testing with 0002 seems like a good idea. There are no other\nchanges yet. Next, I will take a look at templating local vs. shared\nmemory. I might try basing that on the styles of both v12 and v8, and see\nwhich one works best with templating.\n\n[1]\nhttps://www.postgresql.org/message-id/CAFBsxsFW2JjTo58jtDB%2B3sZhxMx3t-3evew8%3DAcr%2BGGhC%2BkFaA%40mail.gmail.com\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Tue, 6 Dec 2022 17:32:08 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Dec 6, 2022 at 7:32 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> On Fri, Dec 2, 2022 at 11:42 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > On Mon, Nov 14, 2022 at 7:59 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n> > > >\n> > > > - Optimize node128 insert.\n> > >\n> > > I've attached a rough start at this. The basic idea is borrowed from our bitmapset nodes, so we can iterate over and operate on word-sized (32- or 64-bit) types at a time, rather than bytes.\n> >\n> > Thanks! I think this is a good idea.\n> >\n> > > To make this easier, I've moved some of the lower-level macros and types from bitmapset.h/.c to pg_bitutils.h. That's probably going to need a separate email thread to resolve the coding style clash this causes, so that can be put off for later.\n>\n> I started a separate thread [1], and 0002 comes from feedback on that. There is a FIXME about using WORDNUM and BITNUM, at least with that spelling. I'm putting that off to ease rebasing the rest as v13 -- getting some CI testing with 0002 seems like a good idea. There are no other changes yet. Next, I will take a look at templating local vs. shared memory. I might try basing that on the styles of both v12 and v8, and see which one works best with templating.\n\nThank you so much!\n\nIn the meanwhile, I've been working on vacuum integration. There are\ntwo things I'd like to discuss some time:\n\nThe first is the minimum of maintenance_work_mem, 1 MB. Since the\ninitial DSA segment size is 1MB (DSA_INITIAL_SEGMENT_SIZE), parallel\nvacuum with radix tree cannot work with the minimum\nmaintenance_work_mem. It will need to increase it to 4MB or so. Maybe\nwe can start a new thread for that.\n\nThe second is how to limit the size of the radix tree to\nmaintenance_work_mem. I think that it's tricky to estimate the maximum\nnumber of keys in the radix tree that fit in maintenance_work_mem. The\nradix tree size varies depending on the key distribution. The next\nidea I considered was how to limit the size when inserting a key. In\norder to strictly limit the radix tree size, probably we have to\nchange the rt_set so that it breaks off and returns false if the radix\ntree size is about to exceed the memory limit when we allocate a new\nnode or grow a node kind/class. Ideally, I'd like to control the size\noutside of radix tree (e.g. TIDStore) since it could introduce\noverhead to rt_set() but probably we need to add such logic in radix\ntree.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 9 Dec 2022 10:19:29 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 9, 2022 at 8:20 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> In the meanwhile, I've been working on vacuum integration. There are\n> two things I'd like to discuss some time:\n>\n> The first is the minimum of maintenance_work_mem, 1 MB. Since the\n> initial DSA segment size is 1MB (DSA_INITIAL_SEGMENT_SIZE), parallel\n> vacuum with radix tree cannot work with the minimum\n> maintenance_work_mem. It will need to increase it to 4MB or so. Maybe\n> we can start a new thread for that.\n\nI don't think that'd be very controversial, but I'm also not sure why we'd\nneed 4MB -- can you explain in more detail what exactly we'd need so that\nthe feature would work? (The minimum doesn't have to work *well* IIUC, just\ndo some useful work and not fail).\n\n> The second is how to limit the size of the radix tree to\n> maintenance_work_mem. I think that it's tricky to estimate the maximum\n> number of keys in the radix tree that fit in maintenance_work_mem. The\n> radix tree size varies depending on the key distribution. The next\n> idea I considered was how to limit the size when inserting a key. In\n> order to strictly limit the radix tree size, probably we have to\n> change the rt_set so that it breaks off and returns false if the radix\n> tree size is about to exceed the memory limit when we allocate a new\n> node or grow a node kind/class.\n\nThat seems complex, fragile, and wrong scope.\n\n> Ideally, I'd like to control the size\n> outside of radix tree (e.g. TIDStore) since it could introduce\n> overhead to rt_set() but probably we need to add such logic in radix\n> tree.\n\nDoes the TIDStore have the ability to ask the DSA (or slab context) to see\nhow big it is? If a new segment has been allocated that brings us to the\nlimit, we can stop when we discover that fact. In the local case with slab\nblocks, it won't be on nice neat boundaries, but we could check if we're\nwithin the largest block size (~64kB) of overflow.\n\nRemember when we discussed how we might approach parallel pruning? I\nenvisioned a local array of a few dozen kilobytes to reduce contention on\nthe tidstore. We could use such an array even for a single worker (always\ndoing the same thing is simpler anyway). When the array fills up enough so\nthat the next heap page *could* overflow it: Stop, insert into the store,\nand check the store's memory usage before continuing.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Fri, Dec 9, 2022 at 8:20 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> In the meanwhile, I've been working on vacuum integration. There are> two things I'd like to discuss some time:>> The first is the minimum of maintenance_work_mem, 1 MB. Since the> initial DSA segment size is 1MB (DSA_INITIAL_SEGMENT_SIZE), parallel> vacuum with radix tree cannot work with the minimum> maintenance_work_mem. It will need to increase it to 4MB or so. Maybe> we can start a new thread for that.I don't think that'd be very controversial, but I'm also not sure why we'd need 4MB -- can you explain in more detail what exactly we'd need so that the feature would work? (The minimum doesn't have to work *well* IIUC, just do some useful work and not fail).> The second is how to limit the size of the radix tree to> maintenance_work_mem. I think that it's tricky to estimate the maximum> number of keys in the radix tree that fit in maintenance_work_mem. The> radix tree size varies depending on the key distribution. The next> idea I considered was how to limit the size when inserting a key. In> order to strictly limit the radix tree size, probably we have to> change the rt_set so that it breaks off and returns false if the radix> tree size is about to exceed the memory limit when we allocate a new> node or grow a node kind/class.That seems complex, fragile, and wrong scope.> Ideally, I'd like to control the size> outside of radix tree (e.g. TIDStore) since it could introduce> overhead to rt_set() but probably we need to add such logic in radix> tree.Does the TIDStore have the ability to ask the DSA (or slab context) to see how big it is? If a new segment has been allocated that brings us to the limit, we can stop when we discover that fact. In the local case with slab blocks, it won't be on nice neat boundaries, but we could check if we're within the largest block size (~64kB) of overflow.Remember when we discussed how we might approach parallel pruning? I envisioned a local array of a few dozen kilobytes to reduce contention on the tidstore. We could use such an array even for a single worker (always doing the same thing is simpler anyway). When the array fills up enough so that the next heap page *could* overflow it: Stop, insert into the store, and check the store's memory usage before continuing.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Fri, 9 Dec 2022 15:53:01 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 9, 2022 at 5:53 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Fri, Dec 9, 2022 at 8:20 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > In the meanwhile, I've been working on vacuum integration. There are\n> > two things I'd like to discuss some time:\n> >\n> > The first is the minimum of maintenance_work_mem, 1 MB. Since the\n> > initial DSA segment size is 1MB (DSA_INITIAL_SEGMENT_SIZE), parallel\n> > vacuum with radix tree cannot work with the minimum\n> > maintenance_work_mem. It will need to increase it to 4MB or so. Maybe\n> > we can start a new thread for that.\n>\n> I don't think that'd be very controversial, but I'm also not sure why we'd need 4MB -- can you explain in more detail what exactly we'd need so that the feature would work? (The minimum doesn't have to work *well* IIUC, just do some useful work and not fail).\n\nThe minimum requirement is 2MB. In PoC patch, TIDStore checks how big\nthe radix tree is using dsa_get_total_size(). If the size returned by\ndsa_get_total_size() (+ some memory used by TIDStore meta information)\nexceeds maintenance_work_mem, lazy vacuum starts to do index vacuum\nand heap vacuum. However, when allocating DSA memory for\nradix_tree_control at creation, we allocate 1MB\n(DSA_INITIAL_SEGMENT_SIZE) DSM memory and use memory required for\nradix_tree_control from it. das_get_total_size() returns 1MB even if\nthere is no TID collected.\n\n>\n> > The second is how to limit the size of the radix tree to\n> > maintenance_work_mem. I think that it's tricky to estimate the maximum\n> > number of keys in the radix tree that fit in maintenance_work_mem. The\n> > radix tree size varies depending on the key distribution. The next\n> > idea I considered was how to limit the size when inserting a key. In\n> > order to strictly limit the radix tree size, probably we have to\n> > change the rt_set so that it breaks off and returns false if the radix\n> > tree size is about to exceed the memory limit when we allocate a new\n> > node or grow a node kind/class.\n>\n> That seems complex, fragile, and wrong scope.\n>\n> > Ideally, I'd like to control the size\n> > outside of radix tree (e.g. TIDStore) since it could introduce\n> > overhead to rt_set() but probably we need to add such logic in radix\n> > tree.\n>\n> Does the TIDStore have the ability to ask the DSA (or slab context) to see how big it is?\n\nYes, TIDStore can check it using dsa_get_total_size().\n\n> If a new segment has been allocated that brings us to the limit, we can stop when we discover that fact. In the local case with slab blocks, it won't be on nice neat boundaries, but we could check if we're within the largest block size (~64kB) of overflow.\n>\n> Remember when we discussed how we might approach parallel pruning? I envisioned a local array of a few dozen kilobytes to reduce contention on the tidstore. We could use such an array even for a single worker (always doing the same thing is simpler anyway). When the array fills up enough so that the next heap page *could* overflow it: Stop, insert into the store, and check the store's memory usage before continuing.\n\nRight, I think it's no problem in slab cases. In DSA cases, the new\nsegment size follows a geometric series that approximately doubles the\ntotal storage each time we create a new segment. This behavior comes\nfrom the fact that the underlying DSM system isn't designed for large\nnumbers of segments.\n\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 9 Dec 2022 22:32:50 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 9, 2022 at 8:33 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Fri, Dec 9, 2022 at 5:53 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n> >\n\n> > I don't think that'd be very controversial, but I'm also not sure why\nwe'd need 4MB -- can you explain in more detail what exactly we'd need so\nthat the feature would work? (The minimum doesn't have to work *well* IIUC,\njust do some useful work and not fail).\n>\n> The minimum requirement is 2MB. In PoC patch, TIDStore checks how big\n> the radix tree is using dsa_get_total_size(). If the size returned by\n> dsa_get_total_size() (+ some memory used by TIDStore meta information)\n> exceeds maintenance_work_mem, lazy vacuum starts to do index vacuum\n> and heap vacuum. However, when allocating DSA memory for\n> radix_tree_control at creation, we allocate 1MB\n> (DSA_INITIAL_SEGMENT_SIZE) DSM memory and use memory required for\n> radix_tree_control from it. das_get_total_size() returns 1MB even if\n> there is no TID collected.\n\n2MB makes sense.\n\nIf the metadata is small, it seems counterproductive to count it towards\nthe total. We want the decision to be driven by blocks allocated. I have an\nidea on that below.\n\n> > Remember when we discussed how we might approach parallel pruning? I\nenvisioned a local array of a few dozen kilobytes to reduce contention on\nthe tidstore. We could use such an array even for a single worker (always\ndoing the same thing is simpler anyway). When the array fills up enough so\nthat the next heap page *could* overflow it: Stop, insert into the store,\nand check the store's memory usage before continuing.\n>\n> Right, I think it's no problem in slab cases. In DSA cases, the new\n> segment size follows a geometric series that approximately doubles the\n> total storage each time we create a new segment. This behavior comes\n> from the fact that the underlying DSM system isn't designed for large\n> numbers of segments.\n\nAnd taking a look, the size of a new segment can get quite large. It seems\nwe could test if the total DSA area allocated is greater than half of\nmaintenance_work_mem. If that parameter is a power of two (common) and\n>=8MB, then the area will contain just under a power of two the last time\nit passes the test. The next segment will bring it to about 3/4 full, like\nthis:\n\nmaintenance work mem = 256MB, so stop if we go over 128MB:\n\n2*(1+2+4+8+16+32) = 126MB -> keep going\n126MB + 64 = 190MB -> stop\n\nThat would be a simple way to be conservative with the memory limit. The\nunfortunate aspect is that the last segment would be mostly wasted, but\nit's paradise compared to the pessimistically-sized single array we have\nnow (even with Peter G.'s VM snapshot informing the allocation size, I\nimagine).\n\nAnd as for minimum possible maintenance work mem, I think this would work\nwith 2MB, if the community is okay with technically going over the limit by\na few bytes of overhead if a buildfarm animal set to that value. I imagine\nit would never go over the limit for realistic (and even most unrealistic)\nvalues. Even with a VM snapshot page in memory and small local arrays of\nTIDs, I think with this scheme we'll be well under the limit.\n\nAfter this feature is complete, I think we should consider a follow-on\npatch to get rid of vacuum_work_mem, since it would no longer be needed.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Fri, Dec 9, 2022 at 8:33 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Fri, Dec 9, 2022 at 5:53 PM John Naylor <john.naylor@enterprisedb.com> wrote:> >> > I don't think that'd be very controversial, but I'm also not sure why we'd need 4MB -- can you explain in more detail what exactly we'd need so that the feature would work? (The minimum doesn't have to work *well* IIUC, just do some useful work and not fail).>> The minimum requirement is 2MB. In PoC patch, TIDStore checks how big> the radix tree is using dsa_get_total_size(). If the size returned by> dsa_get_total_size() (+ some memory used by TIDStore meta information)> exceeds maintenance_work_mem, lazy vacuum starts to do index vacuum> and heap vacuum. However, when allocating DSA memory for> radix_tree_control at creation, we allocate 1MB> (DSA_INITIAL_SEGMENT_SIZE) DSM memory and use memory required for> radix_tree_control from it. das_get_total_size() returns 1MB even if> there is no TID collected.2MB makes sense.If the metadata is small, it seems counterproductive to count it towards the total. We want the decision to be driven by blocks allocated. I have an idea on that below.> > Remember when we discussed how we might approach parallel pruning? I envisioned a local array of a few dozen kilobytes to reduce contention on the tidstore. We could use such an array even for a single worker (always doing the same thing is simpler anyway). When the array fills up enough so that the next heap page *could* overflow it: Stop, insert into the store, and check the store's memory usage before continuing.>> Right, I think it's no problem in slab cases. In DSA cases, the new> segment size follows a geometric series that approximately doubles the> total storage each time we create a new segment. This behavior comes> from the fact that the underlying DSM system isn't designed for large> numbers of segments.And taking a look, the size of a new segment can get quite large. It seems we could test if the total DSA area allocated is greater than half of maintenance_work_mem. If that parameter is a power of two (common) and >=8MB, then the area will contain just under a power of two the last time it passes the test. The next segment will bring it to about 3/4 full, like this:maintenance work mem = 256MB, so stop if we go over 128MB:2*(1+2+4+8+16+32) = 126MB -> keep going126MB + 64 = 190MB        -> stopThat would be a simple way to be conservative with the memory limit. The unfortunate aspect is that the last segment would be mostly wasted, but it's paradise compared to the pessimistically-sized single array we have now (even with Peter G.'s VM snapshot informing the allocation size, I imagine).And as for minimum possible maintenance work mem, I think this would work with 2MB, if the community is okay with technically going over the limit by a few bytes of overhead if a buildfarm animal set to that value. I imagine it would never go over the limit for realistic (and even most unrealistic) values. Even with a VM snapshot page in memory and small local arrays of TIDs, I think with this scheme we'll be well under the limit.After this feature is complete, I think we should consider a follow-on patch to get rid of vacuum_work_mem, since it would no longer be needed.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 12 Dec 2022 17:14:02 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Dec 12, 2022 at 7:14 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Fri, Dec 9, 2022 at 8:33 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Fri, Dec 9, 2022 at 5:53 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n> > >\n>\n> > > I don't think that'd be very controversial, but I'm also not sure why we'd need 4MB -- can you explain in more detail what exactly we'd need so that the feature would work? (The minimum doesn't have to work *well* IIUC, just do some useful work and not fail).\n> >\n> > The minimum requirement is 2MB. In PoC patch, TIDStore checks how big\n> > the radix tree is using dsa_get_total_size(). If the size returned by\n> > dsa_get_total_size() (+ some memory used by TIDStore meta information)\n> > exceeds maintenance_work_mem, lazy vacuum starts to do index vacuum\n> > and heap vacuum. However, when allocating DSA memory for\n> > radix_tree_control at creation, we allocate 1MB\n> > (DSA_INITIAL_SEGMENT_SIZE) DSM memory and use memory required for\n> > radix_tree_control from it. das_get_total_size() returns 1MB even if\n> > there is no TID collected.\n>\n> 2MB makes sense.\n>\n> If the metadata is small, it seems counterproductive to count it towards the total. We want the decision to be driven by blocks allocated. I have an idea on that below.\n>\n> > > Remember when we discussed how we might approach parallel pruning? I envisioned a local array of a few dozen kilobytes to reduce contention on the tidstore. We could use such an array even for a single worker (always doing the same thing is simpler anyway). When the array fills up enough so that the next heap page *could* overflow it: Stop, insert into the store, and check the store's memory usage before continuing.\n> >\n> > Right, I think it's no problem in slab cases. In DSA cases, the new\n> > segment size follows a geometric series that approximately doubles the\n> > total storage each time we create a new segment. This behavior comes\n> > from the fact that the underlying DSM system isn't designed for large\n> > numbers of segments.\n>\n> And taking a look, the size of a new segment can get quite large. It seems we could test if the total DSA area allocated is greater than half of maintenance_work_mem. If that parameter is a power of two (common) and >=8MB, then the area will contain just under a power of two the last time it passes the test. The next segment will bring it to about 3/4 full, like this:\n>\n> maintenance work mem = 256MB, so stop if we go over 128MB:\n>\n> 2*(1+2+4+8+16+32) = 126MB -> keep going\n> 126MB + 64 = 190MB -> stop\n>\n> That would be a simple way to be conservative with the memory limit. The unfortunate aspect is that the last segment would be mostly wasted, but it's paradise compared to the pessimistically-sized single array we have now (even with Peter G.'s VM snapshot informing the allocation size, I imagine).\n\nRight. In this case, even if we allocate 64MB, we will use only 2088\nbytes at maximum. So I think the memory space used for vacuum is\npractically limited to half.\n\n>\n> And as for minimum possible maintenance work mem, I think this would work with 2MB, if the community is okay with technically going over the limit by a few bytes of overhead if a buildfarm animal set to that value. I imagine it would never go over the limit for realistic (and even most unrealistic) values. Even with a VM snapshot page in memory and small local arrays of TIDs, I think with this scheme we'll be well under the limit.\n\nLooking at other code using DSA such as tidbitmap.c and nodeHash.c, it\nseems that they look at only memory that are actually dsa_allocate'd.\nTo be exact, we estimate the number of hash buckets based on work_mem\n(and hash_mem_multiplier) and use it as the upper limit. So I've\nconfirmed that the result of dsa_get_total_size() could exceed the\nlimit. I'm not sure it's a known and legitimate usage. If we can\nfollow such usage, we can probably track how much dsa_allocate'd\nmemory is used in the radix tree. Templating whether or not to count\nthe memory usage might help avoid the overheads.\n\n> After this feature is complete, I think we should consider a follow-on patch to get rid of vacuum_work_mem, since it would no longer be needed.\n\nI think you meant autovacuum_work_mem. Yes, I also think we can get rid of it.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 13 Dec 2022 01:04:40 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Dec 13, 2022 at 1:04 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Dec 12, 2022 at 7:14 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> >\n> > On Fri, Dec 9, 2022 at 8:33 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Fri, Dec 9, 2022 at 5:53 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n> > > >\n> >\n> > > > I don't think that'd be very controversial, but I'm also not sure why we'd need 4MB -- can you explain in more detail what exactly we'd need so that the feature would work? (The minimum doesn't have to work *well* IIUC, just do some useful work and not fail).\n> > >\n> > > The minimum requirement is 2MB. In PoC patch, TIDStore checks how big\n> > > the radix tree is using dsa_get_total_size(). If the size returned by\n> > > dsa_get_total_size() (+ some memory used by TIDStore meta information)\n> > > exceeds maintenance_work_mem, lazy vacuum starts to do index vacuum\n> > > and heap vacuum. However, when allocating DSA memory for\n> > > radix_tree_control at creation, we allocate 1MB\n> > > (DSA_INITIAL_SEGMENT_SIZE) DSM memory and use memory required for\n> > > radix_tree_control from it. das_get_total_size() returns 1MB even if\n> > > there is no TID collected.\n> >\n> > 2MB makes sense.\n> >\n> > If the metadata is small, it seems counterproductive to count it towards the total. We want the decision to be driven by blocks allocated. I have an idea on that below.\n> >\n> > > > Remember when we discussed how we might approach parallel pruning? I envisioned a local array of a few dozen kilobytes to reduce contention on the tidstore. We could use such an array even for a single worker (always doing the same thing is simpler anyway). When the array fills up enough so that the next heap page *could* overflow it: Stop, insert into the store, and check the store's memory usage before continuing.\n> > >\n> > > Right, I think it's no problem in slab cases. In DSA cases, the new\n> > > segment size follows a geometric series that approximately doubles the\n> > > total storage each time we create a new segment. This behavior comes\n> > > from the fact that the underlying DSM system isn't designed for large\n> > > numbers of segments.\n> >\n> > And taking a look, the size of a new segment can get quite large. It seems we could test if the total DSA area allocated is greater than half of maintenance_work_mem. If that parameter is a power of two (common) and >=8MB, then the area will contain just under a power of two the last time it passes the test. The next segment will bring it to about 3/4 full, like this:\n> >\n> > maintenance work mem = 256MB, so stop if we go over 128MB:\n> >\n> > 2*(1+2+4+8+16+32) = 126MB -> keep going\n> > 126MB + 64 = 190MB -> stop\n> >\n> > That would be a simple way to be conservative with the memory limit. The unfortunate aspect is that the last segment would be mostly wasted, but it's paradise compared to the pessimistically-sized single array we have now (even with Peter G.'s VM snapshot informing the allocation size, I imagine).\n>\n> Right. In this case, even if we allocate 64MB, we will use only 2088\n> bytes at maximum. So I think the memory space used for vacuum is\n> practically limited to half.\n>\n> >\n> > And as for minimum possible maintenance work mem, I think this would work with 2MB, if the community is okay with technically going over the limit by a few bytes of overhead if a buildfarm animal set to that value. I imagine it would never go over the limit for realistic (and even most unrealistic) values. Even with a VM snapshot page in memory and small local arrays of TIDs, I think with this scheme we'll be well under the limit.\n>\n> Looking at other code using DSA such as tidbitmap.c and nodeHash.c, it\n> seems that they look at only memory that are actually dsa_allocate'd.\n> To be exact, we estimate the number of hash buckets based on work_mem\n> (and hash_mem_multiplier) and use it as the upper limit. So I've\n> confirmed that the result of dsa_get_total_size() could exceed the\n> limit. I'm not sure it's a known and legitimate usage. If we can\n> follow such usage, we can probably track how much dsa_allocate'd\n> memory is used in the radix tree.\n\nI've experimented with this idea. The newly added 0008 patch changes\nthe radix tree so that it counts the memory usage for both local and\nshared cases. As shown below, there is an overhead for that:\n\nw/o 0008 patch\n\n=# select * from bench_load_random_int(1000000)\nNOTICE: num_keys = 1000000, height = 7, n4 = 4970924, n15 = 38277,\nn32 = 27205, n125 = 0, n256 = 257\n mem_allocated | load_ms\n---------------+---------\n 298453544 | 282\n(1 row)\n\nw/0 0008 patch\n\n=# select * from bench_load_random_int(1000000)\nNOTICE: num_keys = 1000000, height = 7, n4 = 4970924, n15 = 38277,\nn32 = 27205, n125 = 0, n256 = 257\n mem_allocated | load_ms\n---------------+---------\n 293603184 | 297\n(1 row)\n\nAlthough it adds some overhead, I think this idea is straightforward\nand the most practical for users. And it seems to be consistent with\nother components using DSA. We can improve this part in the future for\nbetter memory control, for example, by introducing slab-like DSA\nmemory management.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 19 Dec 2022 16:13:45 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Dec 19, 2022 at 4:13 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Tue, Dec 13, 2022 at 1:04 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Dec 12, 2022 at 7:14 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > >\n> > > On Fri, Dec 9, 2022 at 8:33 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > >\n> > > > On Fri, Dec 9, 2022 at 5:53 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n> > > > >\n> > >\n> > > > > I don't think that'd be very controversial, but I'm also not sure why we'd need 4MB -- can you explain in more detail what exactly we'd need so that the feature would work? (The minimum doesn't have to work *well* IIUC, just do some useful work and not fail).\n> > > >\n> > > > The minimum requirement is 2MB. In PoC patch, TIDStore checks how big\n> > > > the radix tree is using dsa_get_total_size(). If the size returned by\n> > > > dsa_get_total_size() (+ some memory used by TIDStore meta information)\n> > > > exceeds maintenance_work_mem, lazy vacuum starts to do index vacuum\n> > > > and heap vacuum. However, when allocating DSA memory for\n> > > > radix_tree_control at creation, we allocate 1MB\n> > > > (DSA_INITIAL_SEGMENT_SIZE) DSM memory and use memory required for\n> > > > radix_tree_control from it. das_get_total_size() returns 1MB even if\n> > > > there is no TID collected.\n> > >\n> > > 2MB makes sense.\n> > >\n> > > If the metadata is small, it seems counterproductive to count it towards the total. We want the decision to be driven by blocks allocated. I have an idea on that below.\n> > >\n> > > > > Remember when we discussed how we might approach parallel pruning? I envisioned a local array of a few dozen kilobytes to reduce contention on the tidstore. We could use such an array even for a single worker (always doing the same thing is simpler anyway). When the array fills up enough so that the next heap page *could* overflow it: Stop, insert into the store, and check the store's memory usage before continuing.\n> > > >\n> > > > Right, I think it's no problem in slab cases. In DSA cases, the new\n> > > > segment size follows a geometric series that approximately doubles the\n> > > > total storage each time we create a new segment. This behavior comes\n> > > > from the fact that the underlying DSM system isn't designed for large\n> > > > numbers of segments.\n> > >\n> > > And taking a look, the size of a new segment can get quite large. It seems we could test if the total DSA area allocated is greater than half of maintenance_work_mem. If that parameter is a power of two (common) and >=8MB, then the area will contain just under a power of two the last time it passes the test. The next segment will bring it to about 3/4 full, like this:\n> > >\n> > > maintenance work mem = 256MB, so stop if we go over 128MB:\n> > >\n> > > 2*(1+2+4+8+16+32) = 126MB -> keep going\n> > > 126MB + 64 = 190MB -> stop\n> > >\n> > > That would be a simple way to be conservative with the memory limit. The unfortunate aspect is that the last segment would be mostly wasted, but it's paradise compared to the pessimistically-sized single array we have now (even with Peter G.'s VM snapshot informing the allocation size, I imagine).\n> >\n> > Right. In this case, even if we allocate 64MB, we will use only 2088\n> > bytes at maximum. So I think the memory space used for vacuum is\n> > practically limited to half.\n> >\n> > >\n> > > And as for minimum possible maintenance work mem, I think this would work with 2MB, if the community is okay with technically going over the limit by a few bytes of overhead if a buildfarm animal set to that value. I imagine it would never go over the limit for realistic (and even most unrealistic) values. Even with a VM snapshot page in memory and small local arrays of TIDs, I think with this scheme we'll be well under the limit.\n> >\n> > Looking at other code using DSA such as tidbitmap.c and nodeHash.c, it\n> > seems that they look at only memory that are actually dsa_allocate'd.\n> > To be exact, we estimate the number of hash buckets based on work_mem\n> > (and hash_mem_multiplier) and use it as the upper limit. So I've\n> > confirmed that the result of dsa_get_total_size() could exceed the\n> > limit. I'm not sure it's a known and legitimate usage. If we can\n> > follow such usage, we can probably track how much dsa_allocate'd\n> > memory is used in the radix tree.\n>\n> I've experimented with this idea. The newly added 0008 patch changes\n> the radix tree so that it counts the memory usage for both local and\n> shared cases.\n\nI've attached updated version patches to make cfbot happy.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 20 Dec 2022 14:03:58 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Dec 19, 2022 at 2:14 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Tue, Dec 13, 2022 at 1:04 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> > Looking at other code using DSA such as tidbitmap.c and nodeHash.c, it\n> > seems that they look at only memory that are actually dsa_allocate'd.\n> > To be exact, we estimate the number of hash buckets based on work_mem\n> > (and hash_mem_multiplier) and use it as the upper limit. So I've\n> > confirmed that the result of dsa_get_total_size() could exceed the\n> > limit. I'm not sure it's a known and legitimate usage. If we can\n> > follow such usage, we can probably track how much dsa_allocate'd\n> > memory is used in the radix tree.\n>\n> I've experimented with this idea. The newly added 0008 patch changes\n> the radix tree so that it counts the memory usage for both local and\n> shared cases. As shown below, there is an overhead for that:\n>\n> w/o 0008 patch\n> 298453544 | 282\n\n> w/0 0008 patch\n> 293603184 | 297\n\nThis adds about as much overhead as the improvement I measured in the v4\nslab allocator patch. That's not acceptable, and is exactly what Andres\nwarned about in\n\nhttps://www.postgresql.org/message-id/20220704211822.kfxtzpcdmslzm2dy%40awork3.anarazel.de\n\nI'm guessing the hash join case can afford to be precise about memory\nbecause it must spill to disk when exceeding workmem. We don't have that\ndesign constraint.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Dec 19, 2022 at 2:14 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Tue, Dec 13, 2022 at 1:04 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> > Looking at other code using DSA such as tidbitmap.c and nodeHash.c, it> > seems that they look at only memory that are actually dsa_allocate'd.> > To be exact, we estimate the number of hash buckets based on work_mem> > (and hash_mem_multiplier) and use it as the upper limit. So I've> > confirmed that the result of dsa_get_total_size() could exceed the> > limit. I'm not sure it's a known and legitimate usage. If we can> > follow such usage, we can probably track how much dsa_allocate'd> > memory is used in the radix tree.>> I've experimented with this idea. The newly added 0008 patch changes> the radix tree so that it counts the memory usage for both local and> shared cases. As shown below, there is an overhead for that:>> w/o 0008 patch>      298453544 |     282> w/0 0008 patch>      293603184 |     297This adds about as much overhead as the improvement I measured in the v4 slab allocator patch. That's not acceptable, and is exactly what Andres warned about inhttps://www.postgresql.org/message-id/20220704211822.kfxtzpcdmslzm2dy%40awork3.anarazel.deI'm guessing the hash join case can afford to be precise about memory because it must spill to disk when exceeding workmem. We don't have that design constraint.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Tue, 20 Dec 2022 13:09:37 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Dec 20, 2022 at 3:09 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Mon, Dec 19, 2022 at 2:14 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Tue, Dec 13, 2022 at 1:04 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > > Looking at other code using DSA such as tidbitmap.c and nodeHash.c, it\n> > > seems that they look at only memory that are actually dsa_allocate'd.\n> > > To be exact, we estimate the number of hash buckets based on work_mem\n> > > (and hash_mem_multiplier) and use it as the upper limit. So I've\n> > > confirmed that the result of dsa_get_total_size() could exceed the\n> > > limit. I'm not sure it's a known and legitimate usage. If we can\n> > > follow such usage, we can probably track how much dsa_allocate'd\n> > > memory is used in the radix tree.\n> >\n> > I've experimented with this idea. The newly added 0008 patch changes\n> > the radix tree so that it counts the memory usage for both local and\n> > shared cases. As shown below, there is an overhead for that:\n> >\n> > w/o 0008 patch\n> > 298453544 | 282\n>\n> > w/0 0008 patch\n> > 293603184 | 297\n>\n> This adds about as much overhead as the improvement I measured in the v4 slab allocator patch.\n\nOh, yes, that's bad.\n\n> https://www.postgresql.org/message-id/20220704211822.kfxtzpcdmslzm2dy%40awork3.anarazel.de\n>\n> I'm guessing the hash join case can afford to be precise about memory because it must spill to disk when exceeding workmem. We don't have that design constraint.\n\nYou mean that the memory used by the radix tree should be limited not\nby the amount of memory actually used, but by the amount of memory\nallocated? In other words, it checks by MomoryContextMemAllocated() in\nthe local cases and by dsa_get_total_size() in the shared case.\n\nThe idea of using up to half of maintenance_work_mem might be a good\nidea compared to the current flat-array solution. But since it only\nuses half, I'm concerned that there will be users who double their\nmaintenace_work_mem. When it is improved, the user needs to restore\nmaintenance_work_mem again.\n\nA better solution would be to have slab-like DSA. We allocate the\ndynamic shared memory by adding fixed-length large segments. However,\ndownside would be since the segment size gets large we need to\nincrease maintenance_work_mem as well. Also, this patch set is already\ngetting bigger and more complicated, I don't think it's a good idea to\nadd more.\n\nIf we limit the memory usage by checking the amount of memory actually\nused, we can use SlabStats() for the local cases. Since DSA doesn't\nhave such functionality for now we would need to add it. Or we can\ntrack it in the radix tree only in the shared cases.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 21 Dec 2022 17:09:16 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Dec 21, 2022 at 3:09 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Tue, Dec 20, 2022 at 3:09 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\n> >\nhttps://www.postgresql.org/message-id/20220704211822.kfxtzpcdmslzm2dy%40awork3.anarazel.de\n> >\n> > I'm guessing the hash join case can afford to be precise about memory\nbecause it must spill to disk when exceeding workmem. We don't have that\ndesign constraint.\n>\n> You mean that the memory used by the radix tree should be limited not\n> by the amount of memory actually used, but by the amount of memory\n> allocated? In other words, it checks by MomoryContextMemAllocated() in\n> the local cases and by dsa_get_total_size() in the shared case.\n\nI mean, if this patch set uses 10x less memory than v15 (not always, but\neasy to find cases where it does), and if it's also expensive to track\nmemory use precisely, then we don't have an incentive to track memory\nprecisely. Even if we did, we don't want to assume that every future caller\nof radix tree is willing to incur that cost.\n\n> The idea of using up to half of maintenance_work_mem might be a good\n> idea compared to the current flat-array solution. But since it only\n> uses half, I'm concerned that there will be users who double their\n> maintenace_work_mem. When it is improved, the user needs to restore\n> maintenance_work_mem again.\n\nI find it useful to step back and look at the usage patterns:\n\nAutovacuum: Limiting the memory allocated by vacuum is important, since\nthere are multiple workers and they can run at any time (possibly most of\nthe time). This case will not use parallel index vacuum, so will use slab,\nwhere the quick estimation of memory taken by the context is not terribly\nfar off, so we can afford to be more optimistic here.\n\nManual vacuum: The default configuration assumes we want to finish as soon\nas possible (vacuum_cost_delay is zero). Parallel index vacuum can be used.\nMy experience leads me to believe users are willing to use a lot of memory\nto make manual vacuum finish as quickly as possible, and are disappointed\nto learn that even if maintenance work mem is 10GB, vacuum can only use 1GB.\n\nSo I don't believe anyone will have to double maintenance work mem after\nupgrading (even with pessimistic accounting) because we'll be both\n- much more efficient with memory on average\n- free from the 1GB cap\n\nThat said, it's possible 50% is too pessimistic -- a 75% threshold will\nbring us very close to powers of two for example:\n\n2*(1+2+4+8+16+32+64+128) + 256 = 766MB (74.8% of 1GB) -> keep going\n766 + 256 = 1022MB -> stop\n\nI'm not sure if that calculation could cause going over the limit, or how\ncommon that would be.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Wed, Dec 21, 2022 at 3:09 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Tue, Dec 20, 2022 at 3:09 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> > https://www.postgresql.org/message-id/20220704211822.kfxtzpcdmslzm2dy%40awork3.anarazel.de> >> > I'm guessing the hash join case can afford to be precise about memory because it must spill to disk when exceeding workmem. We don't have that design constraint.>> You mean that the memory used by the radix tree should be limited not> by the amount of memory actually used, but by the amount of memory> allocated? In other words, it checks by MomoryContextMemAllocated() in> the local cases and by dsa_get_total_size() in the shared case.I mean, if this patch set uses 10x less memory than v15 (not always, but easy to find cases where it does), and if it's also expensive to track memory use precisely, then we don't have an incentive to track memory precisely. Even if we did, we don't want to assume that every future caller of radix tree is willing to incur that cost.> The idea of using up to half of maintenance_work_mem might be a good> idea compared to the current flat-array solution. But since it only> uses half, I'm concerned that there will be users who double their> maintenace_work_mem. When it is improved, the user needs to restore> maintenance_work_mem again.I find it useful to step back and look at the usage patterns:Autovacuum: Limiting the memory allocated by vacuum is important, since there are multiple workers and they can run at any time (possibly most of the time). This case will not use parallel index vacuum, so will use slab, where the quick estimation of memory taken by the context is not terribly far off, so we can afford to be more optimistic here.Manual vacuum: The default configuration assumes we want to finish as soon as possible (vacuum_cost_delay is zero). Parallel index vacuum can be used. My experience leads me to believe users are willing to use a lot of memory to make manual vacuum finish as quickly as possible, and are disappointed to learn that even if maintenance work mem is 10GB, vacuum can only use 1GB.So I don't believe anyone will have to double maintenance work mem after upgrading (even with pessimistic accounting) because we'll be both- much more efficient with memory on average- free from the 1GB capThat said, it's possible 50% is too pessimistic -- a 75% threshold will bring us very close to powers of two for example:2*(1+2+4+8+16+32+64+128) + 256 = 766MB (74.8% of 1GB) -> keep going766 + 256 = 1022MB -> stopI'm not sure if that calculation could cause going over the limit, or how common that would be.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 22 Dec 2022 17:24:16 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Dec 22, 2022 at 7:24 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Wed, Dec 21, 2022 at 3:09 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Tue, Dec 20, 2022 at 3:09 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n>\n> > > https://www.postgresql.org/message-id/20220704211822.kfxtzpcdmslzm2dy%40awork3.anarazel.de\n> > >\n> > > I'm guessing the hash join case can afford to be precise about memory because it must spill to disk when exceeding workmem. We don't have that design constraint.\n> >\n> > You mean that the memory used by the radix tree should be limited not\n> > by the amount of memory actually used, but by the amount of memory\n> > allocated? In other words, it checks by MomoryContextMemAllocated() in\n> > the local cases and by dsa_get_total_size() in the shared case.\n>\n> I mean, if this patch set uses 10x less memory than v15 (not always, but easy to find cases where it does), and if it's also expensive to track memory use precisely, then we don't have an incentive to track memory precisely. Even if we did, we don't want to assume that every future caller of radix tree is willing to incur that cost.\n\nUnderstood.\n\n>\n> > The idea of using up to half of maintenance_work_mem might be a good\n> > idea compared to the current flat-array solution. But since it only\n> > uses half, I'm concerned that there will be users who double their\n> > maintenace_work_mem. When it is improved, the user needs to restore\n> > maintenance_work_mem again.\n>\n> I find it useful to step back and look at the usage patterns:\n>\n> Autovacuum: Limiting the memory allocated by vacuum is important, since there are multiple workers and they can run at any time (possibly most of the time). This case will not use parallel index vacuum, so will use slab, where the quick estimation of memory taken by the context is not terribly far off, so we can afford to be more optimistic here.\n>\n> Manual vacuum: The default configuration assumes we want to finish as soon as possible (vacuum_cost_delay is zero). Parallel index vacuum can be used. My experience leads me to believe users are willing to use a lot of memory to make manual vacuum finish as quickly as possible, and are disappointed to learn that even if maintenance work mem is 10GB, vacuum can only use 1GB.\n\nAgreed.\n\n> So I don't believe anyone will have to double maintenance work mem after upgrading (even with pessimistic accounting) because we'll be both\n> - much more efficient with memory on average\n> - free from the 1GB cap\n\nMake sense.\n\n>\n> That said, it's possible 50% is too pessimistic -- a 75% threshold will bring us very close to powers of two for example:\n>\n> 2*(1+2+4+8+16+32+64+128) + 256 = 766MB (74.8% of 1GB) -> keep going\n> 766 + 256 = 1022MB -> stop\n>\n> I'm not sure if that calculation could cause going over the limit, or how common that would be.\n>\n\nIf the value is a power of 2, it seems to work perfectly fine. But for\nexample if it's 700MB, the total memory exceeds the limit:\n\n2*(1+2+4+8+16+32+64+128) = 510MB (72.8% of 700MB) -> keep going\n510 + 256 = 766MB -> stop but it exceeds the limit.\n\nIn a more bigger case, if it's 11000MB,\n\n2*(1+2+...+2048) = 8190MB (74.4%)\n8190 + 4096 = 12286MB\n\nThat being said, I don't think they are not common cases. So the 75%\nthreshold seems to work fine in most cases.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 22 Dec 2022 23:59:22 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Dec 22, 2022 at 10:00 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> If the value is a power of 2, it seems to work perfectly fine. But for\n> example if it's 700MB, the total memory exceeds the limit:\n>\n> 2*(1+2+4+8+16+32+64+128) = 510MB (72.8% of 700MB) -> keep going\n> 510 + 256 = 766MB -> stop but it exceeds the limit.\n>\n> In a more bigger case, if it's 11000MB,\n>\n> 2*(1+2+...+2048) = 8190MB (74.4%)\n> 8190 + 4096 = 12286MB\n>\n> That being said, I don't think they are not common cases. So the 75%\n> threshold seems to work fine in most cases.\n\nThinking some more, I agree this doesn't have large practical risk, but\nthinking from the point of view of the community, being loose with memory\nlimits by up to 10% is not a good precedent.\n\nPerhaps we can be clever and use 75% when the limit is a power of two and\n50% otherwise. I'm skeptical of trying to be clever, and I just thought of\nan additional concern: We're assuming behavior of the growth in size of new\nDSA segments, which could possibly change. Given how allocators are\ntypically coded, though, it seems safe to assume that they'll at most\ndouble in size.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Dec 22, 2022 at 10:00 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> If the value is a power of 2, it seems to work perfectly fine. But for> example if it's 700MB, the total memory exceeds the limit:>> 2*(1+2+4+8+16+32+64+128) = 510MB (72.8% of 700MB) -> keep going> 510 + 256 = 766MB -> stop but it exceeds the limit.>> In a more bigger case, if it's 11000MB,>> 2*(1+2+...+2048) = 8190MB (74.4%)> 8190 + 4096 = 12286MB>> That being said, I don't think they are not common cases. So the 75%> threshold seems to work fine in most cases.Thinking some more, I agree this doesn't have large practical risk, but thinking from the point of view of the community, being loose with memory limits by up to 10% is not a good precedent.Perhaps we can be clever and use 75% when the limit is a power of two and 50% otherwise. I'm skeptical of trying to be clever, and I just thought of an additional concern: We're assuming behavior of the growth in size of new DSA segments, which could possibly change. Given how allocators are typically coded, though, it seems safe to assume that they'll at most double in size.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Fri, 23 Dec 2022 14:33:18 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I wrote:\n\n> - Try templating out the differences between local and shared memory.\n\nHere is a brief progress report before Christmas vacation.\n\nI thought the best way to approach this was to go \"inside out\", that is,\nstart with the modest goal of reducing duplicated code for v16.\n\n0001-0005 are copies from v13.\n\n0006 whacks around the rt_node_insert_inner function to reduce the \"surface\narea\" as far as symbols and casts. This includes replacing the goto with an\nextra \"unlikely\" branch.\n\n0007 removes the STRICT pragma for one of our benchmark functions that\ncrept in somewhere -- it should use the default and not just return NULL\ninstantly.\n\n0008 further whacks around the node-growing code in rt_node_insert_inner to\nremove casts. When growing the size class within the same kind, we have no\nneed for a \"new32\" (etc) variable. Also, to keep from getting confused\nabout what an assert build verifies at the end, add a \"newnode\" variable\nand assign it to \"node\" as soon as possible.\n\n0009 uses the bitmap logic from 0004 for node256 also. There is no\nperformance reason for this, because there is no iteration needed, but it's\ngood for simplicity and consistency.\n\n0010 and 0011 template a common implementation for both leaf and inner\nnodes for searching and inserting.\n\n0012: While at it, I couldn't resist using this technique to separate out\ndelete from search, which makes sense and might give a small performance\nboost (at least on less capable hardware). I haven't got to the iteration\nfunctions, but they should be straightforward.\n\nThere is more that could be done here, but I didn't want to get too ahead\nof myself. For example, it's possible that struct members \"children\" and\n\"values\" are names that don't need to be distinguished. Making them the\nsame would reduce code like\n\n+#ifdef RT_NODE_LEVEL_LEAF\n+ n32->values[insertpos] = value;\n+#else\n+ n32->children[insertpos] = child;\n+#endif\n\n...but there could be downsides and I don't want to distract from the goal\nof dealing with shared memory.\n\nThe tests pass, but it's not impossible that there is a new bug somewhere.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 23 Dec 2022 18:47:08 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 23, 2022 at 8:47 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> I wrote:\n>\n> > - Try templating out the differences between local and shared memory.\n>\n> Here is a brief progress report before Christmas vacation.\n\nThanks!\n\n>\n> I thought the best way to approach this was to go \"inside out\", that is, start with the modest goal of reducing duplicated code for v16.\n>\n> 0001-0005 are copies from v13.\n>\n> 0006 whacks around the rt_node_insert_inner function to reduce the \"surface area\" as far as symbols and casts. This includes replacing the goto with an extra \"unlikely\" branch.\n>\n> 0007 removes the STRICT pragma for one of our benchmark functions that crept in somewhere -- it should use the default and not just return NULL instantly.\n>\n> 0008 further whacks around the node-growing code in rt_node_insert_inner to remove casts. When growing the size class within the same kind, we have no need for a \"new32\" (etc) variable. Also, to keep from getting confused about what an assert build verifies at the end, add a \"newnode\" variable and assign it to \"node\" as soon as possible.\n>\n> 0009 uses the bitmap logic from 0004 for node256 also. There is no performance reason for this, because there is no iteration needed, but it's good for simplicity and consistency.\n\nThese 4 patches make sense to me. We can merge them into 0002 patch\nand I'll do similar changes for functions for leaf nodes as well.\n\n> 0010 and 0011 template a common implementation for both leaf and inner nodes for searching and inserting.\n>\n> 0012: While at it, I couldn't resist using this technique to separate out delete from search, which makes sense and might give a small performance boost (at least on less capable hardware). I haven't got to the iteration functions, but they should be straightforward.\n\nCool!\n\n>\n> There is more that could be done here, but I didn't want to get too ahead of myself. For example, it's possible that struct members \"children\" and \"values\" are names that don't need to be distinguished. Making them the same would reduce code like\n>\n> +#ifdef RT_NODE_LEVEL_LEAF\n> + n32->values[insertpos] = value;\n> +#else\n> + n32->children[insertpos] = child;\n> +#endif\n>\n> ...but there could be downsides and I don't want to distract from the goal of dealing with shared memory.\n\nWith these patches, some functions in radixtree.h load the header\nfiles, radixtree_xxx_impl.h, that have the function body. What do you\nthink about how we can expand this template method to deal with DSA\nmemory? I imagined that we load say radixtree_template.h with some\nmacros to use the radix tree like we do for simplehash.h. And\nradixtree_template.h further loads xxx_impl.h files for some internal\nfunctions.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 27 Dec 2022 02:13:43 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Dec 27, 2022 at 12:14 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Fri, Dec 23, 2022 at 8:47 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\n> These 4 patches make sense to me.We can merge them into 0002 patch\n\nOkay, then I'll squash them when I post my next patch.\n\n> and I'll do similar changes for functions for leaf nodes as well.\n\nI assume you meant something else? -- some of the differences between inner\nand leaf are already abstracted away.\n\nIn any case, some things are still half-baked, so please wait until my next\npatch before doing work on these files.\n\nAlso, CI found a bug on 32-bit -- I know what I missed and will fix next\nweek.\n\n> > 0010 and 0011 template a common implementation for both leaf and inner\nnodes for searching and inserting.\n> >\n> > 0012: While at it, I couldn't resist using this technique to separate\nout delete from search, which makes sense and might give a small\nperformance boost (at least on less capable hardware). I haven't got to the\niteration functions, but they should be straightforward.\n\nTwo things came to mind since I posted this, which I'll make clear next\npatch:\n- A good compiler will get rid of branches when inlining, so maybe no\ndifference in code generation, but it still looks nicer this way.\n- Delete should really use its own template, because it only _accidentally_\nlooks like search because we don't yet shrink nodes.\n\n> What do you\n> think about how we can expand this template method to deal with DSA\n> memory? I imagined that we load say radixtree_template.h with some\n> macros to use the radix tree like we do for simplehash.h. And\n> radixtree_template.h further loads xxx_impl.h files for some internal\n> functions.\n\nRight, I was thinking the same. I wanted to start small and look for\nopportunities to shrink the code footprint.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Tue, Dec 27, 2022 at 12:14 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Fri, Dec 23, 2022 at 8:47 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> These 4 patches make sense to me.We can merge them into 0002 patchOkay, then I'll squash them when I post my next patch.> and I'll do similar changes for functions for leaf nodes as well.I assume you meant something else? -- some of the differences between inner and leaf are already abstracted away. In any case, some things are still half-baked, so please wait until my next patch before doing work on these files.Also, CI found a bug on 32-bit -- I know what I missed and will fix next week.> > 0010 and 0011 template a common implementation for both leaf and inner nodes for searching and inserting.> >> > 0012: While at it, I couldn't resist using this technique to separate out delete from search, which makes sense and might give a small performance boost (at least on less capable hardware). I haven't got to the iteration functions, but they should be straightforward.Two things came to mind since I posted this, which I'll make clear next patch:- A good compiler will get rid of branches when inlining, so maybe no difference in code generation, but it still looks nicer this way.- Delete should really use its own template, because it only _accidentally_ looks like search because we don't yet shrink nodes.> What do you> think about how we can expand this template method to deal with DSA> memory? I imagined that we load say radixtree_template.h with some> macros to use the radix tree like we do for simplehash.h. And> radixtree_template.h further loads xxx_impl.h files for some internal> functions.Right, I was thinking the same. I wanted to start small and look for opportunities to shrink the code footprint.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Tue, 27 Dec 2022 12:24:18 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Dec 27, 2022 at 2:24 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Tue, Dec 27, 2022 at 12:14 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Fri, Dec 23, 2022 at 8:47 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n>\n> > These 4 patches make sense to me.We can merge them into 0002 patch\n>\n> Okay, then I'll squash them when I post my next patch.\n>\n> > and I'll do similar changes for functions for leaf nodes as well.\n>\n> I assume you meant something else? -- some of the differences between inner and leaf are already abstracted away.\n\nRight. If we template these routines I don't need that.\n\n>\n> In any case, some things are still half-baked, so please wait until my next patch before doing work on these files.\n>\n> Also, CI found a bug on 32-bit -- I know what I missed and will fix next week.\n\nThanks!\n\n>\n> > > 0010 and 0011 template a common implementation for both leaf and inner nodes for searching and inserting.\n> > >\n> > > 0012: While at it, I couldn't resist using this technique to separate out delete from search, which makes sense and might give a small performance boost (at least on less capable hardware). I haven't got to the iteration functions, but they should be straightforward.\n>\n> Two things came to mind since I posted this, which I'll make clear next patch:\n> - A good compiler will get rid of branches when inlining, so maybe no difference in code generation, but it still looks nicer this way.\n> - Delete should really use its own template, because it only _accidentally_ looks like search because we don't yet shrink nodes.\n\nOkay.\n\n>\n> > What do you\n> > think about how we can expand this template method to deal with DSA\n> > memory? I imagined that we load say radixtree_template.h with some\n> > macros to use the radix tree like we do for simplehash.h. And\n> > radixtree_template.h further loads xxx_impl.h files for some internal\n> > functions.\n>\n> Right, I was thinking the same. I wanted to start small and look for opportunities to shrink the code footprint.\n\nThank you for your confirmation!\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 27 Dec 2022 15:39:02 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "> [working on templating]\n\nIn the end, I decided to base my effort on v8, and not v12 (based on one of\nmy less-well-thought-out ideas). The latter was a good experiment, but it\ndid not lead to an increase in readability as I had hoped. The attached v17\nis still rough, but it's in good enough shape to evaluate a mostly-complete\ntemplating implementation.\n\nPart of what I didn't like about v8 was distinctions like \"node\" vs\n\"nodep\", which hinder readability. I've used \"allocnode\" for some cases\nwhere it makes sense, which is translated to \"newnode\" for the local\npointer. Some places I just gave up and used \"nodep\" for parameters like in\nv8, just to get it done. We can revisit naming later.\n\nNot done yet:\n\n- get_handle() is not implemented\n- rt_attach is defined but unused\n- grow_node_kind() was hackishly removed, but could be turned into a macro\n(or function that writes to 2 pointers)\n- node_update_inner() is back, now that we can share a template with\n\"search\". Seems easier to read, and I suspect this is easier for the\ncompiler.\n- the value type should really be a template macro, but is still hard-coded\nto uint64\n- I think it's okay if the key is hard coded for PG16: If some use case\nneeds more than uint64, we could consider \"single-value leaves\" with varlen\nkeys as a template option.\n- benchmark tests not updated\n\nv13-0007 had some changes to the regression tests, but I haven't included\nthose. The tests from v13-0003 do pass, both locally and shared. I quickly\nhacked together changing shared/local tests by hand (need to recompile),\nbut it would be good for maintainability if tests could run once each with\nlocal and shmem, but use the same \"expected\" test output.\n\nAlso, I didn't look to see if there were any changes in v14/15 that didn't\nhave to do with precise memory accounting.\n\nAt this point, Masahiko, I'd appreciate your feedback on whether this is an\nimprovement at all (or at least a good base for improvement), especially\nfor integrating with the TID store. I think there are some advantages to\nthe template approach. One possible disadvantage is needing separate\nfunctions for each local and shared memory.\n\nIf we go this route, I do think the TID store should invoke the template as\nstatic functions. I'm not quite comfortable with a global function that may\nnot fit well with future use cases.\n\nOne review point I'll mention: Somehow I didn't notice there is no use for\nthe \"chunk\" field in the rt_node type -- it's only set to zero and copied\nwhen growing. What is the purpose? Removing it would allow the\nsmallest node to take up only 32 bytes with a fanout of 3, by eliminating\npadding.\n\nAlso, v17-0005 has an optimization/simplification for growing into node125\n(my version needs an assertion or fallback, but works well now), found by\nanother reading of Andres' prototype There is a lot of good engineering\nthere, we should try to preserve it.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 9 Jan 2023 15:59:04 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 9, 2023 at 5:59 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> > [working on templating]\n>\n> In the end, I decided to base my effort on v8, and not v12 (based on one of my less-well-thought-out ideas). The latter was a good experiment, but it did not lead to an increase in readability as I had hoped. The attached v17 is still rough, but it's in good enough shape to evaluate a mostly-complete templating implementation.\n\nI really appreciate your work!\n\n>\n> v13-0007 had some changes to the regression tests, but I haven't included those. The tests from v13-0003 do pass, both locally and shared. I quickly hacked together changing shared/local tests by hand (need to recompile), but it would be good for maintainability if tests could run once each with local and shmem, but use the same \"expected\" test output.\n\nAgreed.\n\n> Also, I didn't look to see if there were any changes in v14/15 that didn't have to do with precise memory accounting.\n>\n> At this point, Masahiko, I'd appreciate your feedback on whether this is an improvement at all (or at least a good base for improvement), especially for integrating with the TID store. I think there are some advantages to the template approach. One possible disadvantage is needing separate functions for each local and shared memory.\n>\n> If we go this route, I do think the TID store should invoke the template as static functions. I'm not quite comfortable with a global function that may not fit well with future use cases.\n\nIt looks no problem in terms of vacuum integration, although I've not\nfully tested yet. TID store uses the radix tree as the main storage,\nand with the template radix tree, the data types for shared and\nnon-shared will be different. TID store can have an union for the\nradix tree and the structure would be like follows:\n\n/* Per-backend state for a TidStore */\nstruct TidStore\n{\n /*\n * Control object. This is allocated in DSA area 'area' in the shared\n * case, otherwise in backend-local memory.\n */\n TidStoreControl *control;\n\n /* Storage for Tids */\n union tree\n {\n local_radix_tree *local;\n shared_radix_tree *shared;\n };\n\n /* DSA area for TidStore if used */\n dsa_area *area;\n};\n\nIn the functions of TID store, we need to call either local or shared\nradix tree functions depending on whether TID store is shared or not.\nWe need if-branch for each key-value pair insertion, but I think it\nwould not be a big performance problem in TID store use cases, since\nvacuum is an I/O intensive operation in many cases. Overall, I think\nthere is no problem and I'll investigate it in depth.\n\nApart from that, I've been considering the lock support for shared\nradix tree. As we discussed before, the current usage (i.e, only\nparallel index vacuum) doesn't require locking support at all, so it\nwould be enough to have a single lock for simplicity. If we want to\nuse the shared radix tree for other use cases such as the parallel\nheap vacuum or the replacement of the hash table for shared buffers,\nwe would need better lock support. For example, if we want to support\nOptimistic Lock Coupling[1], we would need to change not only the node\nstructure but also the logic. Which probably leads to widen the gap\nbetween the code for non-shared and shared radix tree. In this case,\nonce we have a better radix tree optimized for shared case, perhaps we\ncan replace the templated shared radix tree with it. I'd like to hear\nyour opinion on this line.\n\n>\n> One review point I'll mention: Somehow I didn't notice there is no use for the \"chunk\" field in the rt_node type -- it's only set to zero and copied when growing. What is the purpose? Removing it would allow the smallest node to take up only 32 bytes with a fanout of 3, by eliminating padding.\n\nOh, I didn't notice that. The chunk field was originally used when\nredirecting the child pointer in the parent node from old to new\n(grown) node. When redirecting the pointer, since the corresponding\nchunk surely exists on the parent we can skip existence checks.\nCurrently we use RT_NODE_UPDATE_INNER() for that (see\nRT_REPLACE_NODE()) but having a dedicated function to update the\nexisting chunk and child pointer might improve the performance. Or\nreducing the node size by getting rid of the chunk field might be\nbetter.\n\n> Also, v17-0005 has an optimization/simplification for growing into node125 (my version needs an assertion or fallback, but works well now), found by another reading of Andres' prototype There is a lot of good engineering there, we should try to preserve it.\n\nAgreed.\n\nRegards,\n\n[1] https://db.in.tum.de/~leis/papers/artsync.pdf\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 10 Jan 2023 21:07:41 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 10, 2023 at 7:08 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> It looks no problem in terms of vacuum integration, although I've not\n> fully tested yet. TID store uses the radix tree as the main storage,\n> and with the template radix tree, the data types for shared and\n> non-shared will be different. TID store can have an union for the\n> radix tree and the structure would be like follows:\n\n> /* Storage for Tids */\n> union tree\n> {\n> local_radix_tree *local;\n> shared_radix_tree *shared;\n> };\n\nWe could possibly go back to using a common data type for this, but with\nunused fields in each setting, as before. We would have to be more careful\nof things like the 32-bit crash from a few weeks ago.\n\n> In the functions of TID store, we need to call either local or shared\n> radix tree functions depending on whether TID store is shared or not.\n> We need if-branch for each key-value pair insertion, but I think it\n> would not be a big performance problem in TID store use cases, since\n> vacuum is an I/O intensive operation in many cases.\n\nAlso, the branch will be easily predicted. That was still true in earlier\npatches, but with many more branches and fatter code paths.\n\n> Overall, I think\n> there is no problem and I'll investigate it in depth.\n\nOkay, great. If the separate-functions approach turns out to be ugly, we\ncan always go back to the branching approach for shared memory. I think\nwe'll want to keep this as a template overall, at least to allow different\nvalue types and to ease adding variable-length keys if someone finds a need.\n\n> Apart from that, I've been considering the lock support for shared\n> radix tree. As we discussed before, the current usage (i.e, only\n> parallel index vacuum) doesn't require locking support at all, so it\n> would be enough to have a single lock for simplicity.\n\nRight, that should be enough for PG16.\n\n> If we want to\n> use the shared radix tree for other use cases such as the parallel\n> heap vacuum or the replacement of the hash table for shared buffers,\n> we would need better lock support.\n\nFor future parallel pruning, I still think a global lock is \"probably\" fine\nif the workers buffer in local arrays. Highly concurrent applications will\nneed additional work, of course.\n\n> For example, if we want to support\n> Optimistic Lock Coupling[1],\n\nInteresting, from the same authors!\n\n> we would need to change not only the node\n> structure but also the logic. Which probably leads to widen the gap\n> between the code for non-shared and shared radix tree. In this case,\n> once we have a better radix tree optimized for shared case, perhaps we\n> can replace the templated shared radix tree with it. I'd like to hear\n> your opinion on this line.\n\nI'm not in a position to speculate on how best to do scalable concurrency,\nmuch less how it should coexist with the local implementation. It's\ninteresting that their \"ROWEX\" scheme gives up maintaining order in the\nlinear nodes.\n\n> > One review point I'll mention: Somehow I didn't notice there is no use\nfor the \"chunk\" field in the rt_node type -- it's only set to zero and\ncopied when growing. What is the purpose? Removing it would allow the\nsmallest node to take up only 32 bytes with a fanout of 3, by eliminating\npadding.\n>\n> Oh, I didn't notice that. The chunk field was originally used when\n> redirecting the child pointer in the parent node from old to new\n> (grown) node. When redirecting the pointer, since the corresponding\n> chunk surely exists on the parent we can skip existence checks.\n> Currently we use RT_NODE_UPDATE_INNER() for that (see\n> RT_REPLACE_NODE()) but having a dedicated function to update the\n> existing chunk and child pointer might improve the performance. Or\n> reducing the node size by getting rid of the chunk field might be\n> better.\n\nI see. IIUC from a brief re-reading of the code, saving that chunk would\nonly save us from re-loading \"parent->shift\" from L1 cache and shifting the\nkey. The cycles spent doing that seem small compared to the rest of the\nwork involved in growing a node. Expressions like \"if (idx < 0) return\nfalse;\" return to an asserts-only variable, so in production builds, I\nwould hope that branch gets elided (I haven't checked).\n\nI'm quite keen on making the smallest node padding-free, (since we don't\nyet have path compression or lazy path expansion), and this seems the way\nto get there.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Tue, Jan 10, 2023 at 7:08 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> It looks no problem in terms of vacuum integration, although I've not> fully tested yet. TID store uses the radix tree as the main storage,> and with the template radix tree, the data types for shared and> non-shared will be different. TID store can have an union for the> radix tree and the structure would be like follows:>     /* Storage for Tids */>     union tree>     {>         local_radix_tree    *local;>         shared_radix_tree   *shared;>     };We could possibly go back to using a common data type for this, but with unused fields in each setting, as before. We would have to be more careful of things like the 32-bit crash from a few weeks ago.> In the functions of TID store, we need to call either local or shared> radix tree functions depending on whether TID store is shared or not.> We need if-branch for each key-value pair insertion, but I think it> would not be a big performance problem in TID store use cases, since> vacuum is an I/O intensive operation in many cases.Also, the branch will be easily predicted. That was still true in earlier patches, but with many more branches and fatter code paths.> Overall, I think> there is no problem and I'll investigate it in depth.Okay, great. If the separate-functions approach turns out to be ugly, we can always go back to the branching approach for shared memory. I think we'll want to keep this as a template overall, at least to allow different value types and to ease adding variable-length keys if someone finds a need.> Apart from that, I've been considering the lock support for shared> radix tree. As we discussed before, the current usage (i.e, only> parallel index vacuum) doesn't require locking support at all, so it> would be enough to have a single lock for simplicity.Right, that should be enough for PG16.> If we want to> use the shared radix tree for other use cases such as the parallel> heap vacuum or the replacement of the hash table for shared buffers,> we would need better lock support.For future parallel pruning, I still think a global lock is \"probably\" fine if the workers buffer in local arrays. Highly concurrent applications will need additional work, of course.> For example, if we want to support> Optimistic Lock Coupling[1], Interesting, from the same authors!> we would need to change not only the node> structure but also the logic. Which probably leads to widen the gap> between the code for non-shared and shared radix tree. In this case,> once we have a better radix tree optimized for shared case, perhaps we> can replace the templated shared radix tree with it. I'd like to hear> your opinion on this line.I'm not in a position to speculate on how best to do scalable concurrency, much less how it should coexist with the local implementation. It's interesting that their \"ROWEX\" scheme gives up maintaining order in the linear nodes.> > One review point I'll mention: Somehow I didn't notice there is no use for the \"chunk\" field in the rt_node type -- it's only set to zero and copied when growing. What is the purpose? Removing it would allow the smallest node to take up only 32 bytes with a fanout of 3, by eliminating padding.>> Oh, I didn't notice that. The chunk field was originally used when> redirecting the child pointer in the parent node from old to new> (grown) node. When redirecting the pointer, since the corresponding> chunk surely exists on the parent we can skip existence checks.> Currently we use RT_NODE_UPDATE_INNER() for that (see> RT_REPLACE_NODE()) but having a dedicated function to update the> existing chunk and child pointer might improve the performance. Or> reducing the node size by getting rid of the chunk field might be> better.I see. IIUC from a brief re-reading of the code, saving that chunk would only save us from re-loading \"parent->shift\" from L1 cache and shifting the key. The cycles spent doing that seem small compared to the rest of the work involved in growing a node. Expressions like \"if (idx < 0) return false;\" return to an asserts-only variable, so in production builds, I would hope that branch gets elided (I haven't checked).I'm quite keen on making the smallest node padding-free, (since we don't yet have path compression or lazy path expansion), and this seems the way to get there.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 11 Jan 2023 10:12:54 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I wrote:\n\n> I see. IIUC from a brief re-reading of the code, saving that chunk would\nonly save us from re-loading \"parent->shift\" from L1 cache and shifting the\nkey. The cycles spent doing that seem small compared to the rest of the\nwork involved in growing a node. Expressions like \"if (idx < 0) return\nfalse;\" return to an asserts-only variable, so in production builds, I\nwould hope that branch gets elided (I haven't checked).\n\nOn further reflection, this is completely false and I'm not sure what I was\nthinking. However, for the update-inner case maybe we can assert that we\nfound a valid slot.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nI wrote:> I see. IIUC from a brief re-reading of the code, saving that chunk would only save us from re-loading \"parent->shift\" from L1 cache and shifting the key. The cycles spent doing that seem small compared to the rest of the work involved in growing a node. Expressions like \"if (idx < 0) return false;\" return to an asserts-only variable, so in production builds, I would hope that branch gets elided (I haven't checked).On further reflection, this is completely false and I'm not sure what I was thinking. However, for the update-inner case maybe we can assert that we found a valid slot.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 11 Jan 2023 16:56:25 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jan 11, 2023 at 12:13 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Tue, Jan 10, 2023 at 7:08 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > It looks no problem in terms of vacuum integration, although I've not\n> > fully tested yet. TID store uses the radix tree as the main storage,\n> > and with the template radix tree, the data types for shared and\n> > non-shared will be different. TID store can have an union for the\n> > radix tree and the structure would be like follows:\n>\n> > /* Storage for Tids */\n> > union tree\n> > {\n> > local_radix_tree *local;\n> > shared_radix_tree *shared;\n> > };\n>\n> We could possibly go back to using a common data type for this, but with unused fields in each setting, as before. We would have to be more careful of things like the 32-bit crash from a few weeks ago.\n\nOne idea to have a common data type without unused fields is to use\nradix_tree a base class. We cast it to radix_tree_shared or\nradix_tree_local depending on the flag is_shared in radix_tree. For\ninstance we have like (based on non-template version),\n\nstruct radix_tree\n{\n bool is_shared;\n MemoryContext context;\n};\n\ntypedef struct rt_shared\n{\n rt_handle handle;\n uint32 magic;\n\n /* Root node */\n dsa_pointer root;\n\n uint64 max_val;\n uint64 num_keys;\n\n /* need a lwlock */\n\n /* statistics */\n#ifdef RT_DEBUG\n int32 cnt[RT_SIZE_CLASS_COUNT];\n#endif\n} rt_shared;\n\nstruct radix_tree_shared\n{\n radix_tree rt;\n\n rt_shared *shared;\n dsa_area *area;\n} radix_tree_shared;\n\nstruct radix_tree_local\n{\n radix_tree rt;\n\n uint64 max_val;\n uint64 num_keys;\n\n rt_node *root;\n\n /* used only when the radix tree is private */\n MemoryContextData *inner_slabs[RT_SIZE_CLASS_COUNT];\n MemoryContextData *leaf_slabs[RT_SIZE_CLASS_COUNT];\n\n /* statistics */\n#ifdef RT_DEBUG\n int32 cnt[RT_SIZE_CLASS_COUNT];\n#endif\n} radix_tree_local;\n\n>\n> > In the functions of TID store, we need to call either local or shared\n> > radix tree functions depending on whether TID store is shared or not.\n> > We need if-branch for each key-value pair insertion, but I think it\n> > would not be a big performance problem in TID store use cases, since\n> > vacuum is an I/O intensive operation in many cases.\n>\n> Also, the branch will be easily predicted. That was still true in earlier patches, but with many more branches and fatter code paths.\n>\n> > Overall, I think\n> > there is no problem and I'll investigate it in depth.\n>\n> Okay, great. If the separate-functions approach turns out to be ugly, we can always go back to the branching approach for shared memory. I think we'll want to keep this as a template overall, at least to allow different value types and to ease adding variable-length keys if someone finds a need.\n\nI agree to keep this as a template. From the vacuum integration\nperspective, it would be better if we can use a common data type for\nshared and local. It makes sense to have different data types if the\nradix trees have different values types.\n\n>\n> > Apart from that, I've been considering the lock support for shared\n> > radix tree. As we discussed before, the current usage (i.e, only\n> > parallel index vacuum) doesn't require locking support at all, so it\n> > would be enough to have a single lock for simplicity.\n>\n> Right, that should be enough for PG16.\n>\n> > If we want to\n> > use the shared radix tree for other use cases such as the parallel\n> > heap vacuum or the replacement of the hash table for shared buffers,\n> > we would need better lock support.\n>\n> For future parallel pruning, I still think a global lock is \"probably\" fine if the workers buffer in local arrays. Highly concurrent applications will need additional work, of course.\n>\n> > For example, if we want to support\n> > Optimistic Lock Coupling[1],\n>\n> Interesting, from the same authors!\n\n+1\n\n>\n> > we would need to change not only the node\n> > structure but also the logic. Which probably leads to widen the gap\n> > between the code for non-shared and shared radix tree. In this case,\n> > once we have a better radix tree optimized for shared case, perhaps we\n> > can replace the templated shared radix tree with it. I'd like to hear\n> > your opinion on this line.\n>\n> I'm not in a position to speculate on how best to do scalable concurrency, much less how it should coexist with the local implementation. It's interesting that their \"ROWEX\" scheme gives up maintaining order in the linear nodes.\n\n>\n> > > One review point I'll mention: Somehow I didn't notice there is no use for the \"chunk\" field in the rt_node type -- it's only set to zero and copied when growing. What is the purpose? Removing it would allow the smallest node to take up only 32 bytes with a fanout of 3, by eliminating padding.\n> >\n> > Oh, I didn't notice that. The chunk field was originally used when\n> > redirecting the child pointer in the parent node from old to new\n> > (grown) node. When redirecting the pointer, since the corresponding\n> > chunk surely exists on the parent we can skip existence checks.\n> > Currently we use RT_NODE_UPDATE_INNER() for that (see\n> > RT_REPLACE_NODE()) but having a dedicated function to update the\n> > existing chunk and child pointer might improve the performance. Or\n> > reducing the node size by getting rid of the chunk field might be\n> > better.\n>\n> I see. IIUC from a brief re-reading of the code, saving that chunk would only save us from re-loading \"parent->shift\" from L1 cache and shifting the key. The cycles spent doing that seem small compared to the rest of the work involved in growing a node. Expressions like \"if (idx < 0) return false;\" return to an asserts-only variable, so in production builds, I would hope that branch gets elided (I haven't checked).\n>\n> I'm quite keen on making the smallest node padding-free, (since we don't yet have path compression or lazy path expansion), and this seems the way to get there.\n\nOkay, let's get rid of that in the v18.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 12 Jan 2023 14:44:14 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jan 12, 2023 at 12:44 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Wed, Jan 11, 2023 at 12:13 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > On Tue, Jan 10, 2023 at 7:08 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> I agree to keep this as a template.\n\nOkay, I'll squash the previous patch and work on cleaning up the internals.\nI'll keep the external APIs the same so that your work on vacuum\nintegration can be easily rebased on top of that, and we can work\nindependently.\n\n> From the vacuum integration\n> perspective, it would be better if we can use a common data type for\n> shared and local. It makes sense to have different data types if the\n> radix trees have different values types.\n\nI agree it would be better, all else being equal. I have some further\nthoughts below.\n\n> > > It looks no problem in terms of vacuum integration, although I've not\n> > > fully tested yet. TID store uses the radix tree as the main storage,\n> > > and with the template radix tree, the data types for shared and\n> > > non-shared will be different. TID store can have an union for the\n> > > radix tree and the structure would be like follows:\n> >\n> > > /* Storage for Tids */\n> > > union tree\n> > > {\n> > > local_radix_tree *local;\n> > > shared_radix_tree *shared;\n> > > };\n> >\n> > We could possibly go back to using a common data type for this, but\nwith unused fields in each setting, as before. We would have to be more\ncareful of things like the 32-bit crash from a few weeks ago.\n>\n> One idea to have a common data type without unused fields is to use\n> radix_tree a base class. We cast it to radix_tree_shared or\n> radix_tree_local depending on the flag is_shared in radix_tree. For\n> instance we have like (based on non-template version),\n\n> struct radix_tree\n> {\n> bool is_shared;\n> MemoryContext context;\n> };\n\nThat could work in principle. My first impression is, just a memory context\nis not much of a base class. Also, casts can creep into a large number of\nplaces.\n\nAnother thought came to mind: I'm guessing the TID store is unusual --\nmeaning most uses of radix tree will only need one kind of memory\n(local/shared). I could be wrong about that, and it _is_ a guess about the\nfuture. If true, then it makes more sense that only code that needs both\nmemory kinds should be responsible for keeping them separate.\n\nThe template might be easier for future use cases if shared memory were\nall-or-nothing, meaning either\n\n- completely different functions and types depending on RT_SHMEM\n- use branches (like v8)\n\nThe union sounds like a good thing to try, but do whatever seems right.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Jan 12, 2023 at 12:44 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Wed, Jan 11, 2023 at 12:13 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> >> > On Tue, Jan 10, 2023 at 7:08 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> I agree to keep this as a template.Okay, I'll squash the previous patch and work on cleaning up the internals. I'll keep the external APIs the same so that your work on vacuum integration can be easily rebased on top of that, and we can work independently.> From the vacuum integration> perspective, it would be better if we can use a common data type for> shared and local. It makes sense to have different data types if the> radix trees have different values types.I agree it would be better, all else being equal. I have some further thoughts below.> > > It looks no problem in terms of vacuum integration, although I've not> > > fully tested yet. TID store uses the radix tree as the main storage,> > > and with the template radix tree, the data types for shared and> > > non-shared will be different. TID store can have an union for the> > > radix tree and the structure would be like follows:> >> > >     /* Storage for Tids */> > >     union tree> > >     {> > >         local_radix_tree    *local;> > >         shared_radix_tree   *shared;> > >     };> >> > We could possibly go back to using a common data type for this, but with unused fields in each setting, as before. We would have to be more careful of things like the 32-bit crash from a few weeks ago.>> One idea to have a common data type without unused fields is to use> radix_tree a base class. We cast it to radix_tree_shared or> radix_tree_local depending on the flag is_shared in radix_tree. For> instance we have like (based on non-template version),> struct radix_tree> {>     bool    is_shared;>     MemoryContext context;> };That could work in principle. My first impression is, just a memory context is not much of a base class. Also, casts can creep into a large number of places.Another thought came to mind: I'm guessing the TID store is unusual -- meaning most uses of radix tree will only need one kind of memory (local/shared). I could be wrong about that, and it _is_ a guess about the future. If true, then it makes more sense that only code that needs both memory kinds should be responsible for keeping them separate.The template might be easier for future use cases if shared memory were all-or-nothing, meaning either- completely different functions and types depending on RT_SHMEM- use branches (like v8)The union sounds like a good thing to try, but do whatever seems right.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 12 Jan 2023 15:20:49 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jan 12, 2023 at 5:21 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Thu, Jan 12, 2023 at 12:44 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Wed, Jan 11, 2023 at 12:13 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > On Tue, Jan 10, 2023 at 7:08 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I agree to keep this as a template.\n>\n> Okay, I'll squash the previous patch and work on cleaning up the internals. I'll keep the external APIs the same so that your work on vacuum integration can be easily rebased on top of that, and we can work independently.\n\nThanks!\n\n>\n> > From the vacuum integration\n> > perspective, it would be better if we can use a common data type for\n> > shared and local. It makes sense to have different data types if the\n> > radix trees have different values types.\n>\n> I agree it would be better, all else being equal. I have some further thoughts below.\n>\n> > > > It looks no problem in terms of vacuum integration, although I've not\n> > > > fully tested yet. TID store uses the radix tree as the main storage,\n> > > > and with the template radix tree, the data types for shared and\n> > > > non-shared will be different. TID store can have an union for the\n> > > > radix tree and the structure would be like follows:\n> > >\n> > > > /* Storage for Tids */\n> > > > union tree\n> > > > {\n> > > > local_radix_tree *local;\n> > > > shared_radix_tree *shared;\n> > > > };\n> > >\n> > > We could possibly go back to using a common data type for this, but with unused fields in each setting, as before. We would have to be more careful of things like the 32-bit crash from a few weeks ago.\n> >\n> > One idea to have a common data type without unused fields is to use\n> > radix_tree a base class. We cast it to radix_tree_shared or\n> > radix_tree_local depending on the flag is_shared in radix_tree. For\n> > instance we have like (based on non-template version),\n>\n> > struct radix_tree\n> > {\n> > bool is_shared;\n> > MemoryContext context;\n> > };\n>\n> That could work in principle. My first impression is, just a memory context is not much of a base class. Also, casts can creep into a large number of places.\n>\n> Another thought came to mind: I'm guessing the TID store is unusual -- meaning most uses of radix tree will only need one kind of memory (local/shared). I could be wrong about that, and it _is_ a guess about the future. If true, then it makes more sense that only code that needs both memory kinds should be responsible for keeping them separate.\n\nTrue.\n\n>\n> The template might be easier for future use cases if shared memory were all-or-nothing, meaning either\n>\n> - completely different functions and types depending on RT_SHMEM\n> - use branches (like v8)\n>\n> The union sounds like a good thing to try, but do whatever seems right.\n\nI've implemented the idea of using union. Let me share WIP code for\ndiscussion, I've attached three patches that can be applied on top of\nv17-0009 patch. v17-0010 implements missing shared memory support\nfunctions such as RT_DETACH and RT_GET_HANDLE, and some fixes.\nv17-0011 patch adds TidStore, and v17-0012 patch is the vacuum\nintegration.\n\nOverall, TidStore implementation with the union idea doesn't look so\nugly to me. But I got many compiler warning about unused radix tree\nfunctions like:\n\ntidstore.c:99:19: warning: 'shared_rt_delete' defined but not used\n[-Wunused-function]\n\nI'm not sure there is a convenient way to suppress this warning but\none idea is to have some macros to specify what operations are\nenabled/declared.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 12 Jan 2023 23:50:32 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 23, 2022 at 4:33 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n>\n> On Thu, Dec 22, 2022 at 10:00 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > If the value is a power of 2, it seems to work perfectly fine. But for\n> > example if it's 700MB, the total memory exceeds the limit:\n> >\n> > 2*(1+2+4+8+16+32+64+128) = 510MB (72.8% of 700MB) -> keep going\n> > 510 + 256 = 766MB -> stop but it exceeds the limit.\n> >\n> > In a more bigger case, if it's 11000MB,\n> >\n> > 2*(1+2+...+2048) = 8190MB (74.4%)\n> > 8190 + 4096 = 12286MB\n> >\n> > That being said, I don't think they are not common cases. So the 75%\n> > threshold seems to work fine in most cases.\n>\n> Thinking some more, I agree this doesn't have large practical risk, but thinking from the point of view of the community, being loose with memory limits by up to 10% is not a good precedent.\n\nAgreed.\n\n> Perhaps we can be clever and use 75% when the limit is a power of two and 50% otherwise. I'm skeptical of trying to be clever, and I just thought of an additional concern: We're assuming behavior of the growth in size of new DSA segments, which could possibly change. Given how allocators are typically coded, though, it seems safe to assume that they'll at most double in size.\n\nSounds good to me.\n\nI've written a simple script to simulate the DSA memory usage and the\nlimit. The 75% limit works fine for a power of two cases, and we can\nuse the 60% limit for other cases (it seems we can use up to about 66%\nbut used 60% for safety). It would be best if we can mathematically\nprove it but I could prove only the power of two cases. But the script\npractically shows the 60% threshold would work for these cases.\n\nRegards\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 16 Jan 2023 11:52:30 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jan 12, 2023 at 9:51 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Thu, Jan 12, 2023 at 5:21 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > Okay, I'll squash the previous patch and work on cleaning up the\ninternals. I'll keep the external APIs the same so that your work on vacuum\nintegration can be easily rebased on top of that, and we can work\nindependently.\n\nThere were some conflicts with HEAD, so to keep the CF bot busy, I've\nquickly put together v18. I still have a lot of cleanup work to do, but\nthis is enough for now.\n\n0003 contains all v17 local-memory coding squashed together.\n\n0004 perf test not updated but it doesn't build by default so it's fine for\nnow\n\n0005 removes node.chunk as discussed, but does not change node4 fanout yet.\n\n0006 is a small cleanup regarding setting node fanout.\n\n0007 squashes my shared memory work with Masahiko's fixes from the addendum\nv17-0010.\n\n0008 turns the existence checks in RT_NODE_UPDATE_INNER into Asserts, as\ndiscussed.\n\n0009/0010 are just copies of Masauiko's v17 addendum v17-0011/12, but the\nlatter rebased over recent variable renaming (it's possible I missed\nsomething, so worth checking).\n\n> I've implemented the idea of using union. Let me share WIP code for\n> discussion, I've attached three patches that can be applied on top of\n\nSeems fine as far as the union goes. Let's go ahead with this, and make\nprogress on locking etc.\n\n> Overall, TidStore implementation with the union idea doesn't look so\n> ugly to me. But I got many compiler warning about unused radix tree\n> functions like:\n>\n> tidstore.c:99:19: warning: 'shared_rt_delete' defined but not used\n> [-Wunused-function]\n>\n> I'm not sure there is a convenient way to suppress this warning but\n> one idea is to have some macros to specify what operations are\n> enabled/declared.\n\nThat sounds like a good idea. It's also worth wondering if we even need\nRT_NUM_ENTRIES at all, since the caller is capable of keeping track of that\nif necessary. It's also misnamed, since it's concerned with the number of\nkeys. The vacuum case cares about the number of TIDs, and not number of\n(encoded) keys. Even if we ever (say) changed the key to blocknumber and\nvalue to Bitmapset, the number of keys might not be interesting. It sounds\nlike we should at least make the delete functionality optional. (Side note\non optional functions: if an implementation didn't care about iteration or\nits order, we could optimize insertion into linear nodes)\n\nSince this is WIP, you may already have some polish in mind, so I won't go\nover the patches in detail, but I wanted to ask about a few things (numbers\nreferring to v17 addendum, not v18):\n\n0011\n\n+ * 'num_tids' is the number of Tids stored so far. 'max_byte' is the\nmaximum\n+ * bytes a TidStore can use. These two fields are commonly used in both\n+ * non-shared case and shared case.\n+ */\n+ uint32 num_tids;\n\nuint32 is how we store the block number, so this too small and will wrap\naround on overflow. int64 seems better.\n\n+ * We calculate the maximum bytes for the TidStore in different ways\n+ * for non-shared case and shared case. Please refer to the comment\n+ * TIDSTORE_MEMORY_DEDUCT for details.\n+ */\n\nMaybe the #define and comment should be close to here.\n\n+ * Destroy a TidStore, returning all memory. The caller must be certain\nthat\n+ * no other backend will attempt to access the TidStore before calling this\n+ * function. Other backend must explicitly call tidstore_detach to free up\n+ * backend-local memory associated with the TidStore. The backend that\ncalls\n+ * tidstore_destroy must not call tidstore_detach.\n+ */\n+void\n+tidstore_destroy(TidStore *ts)\n\nIf not addressed by next patch, need to phrase comment with FIXME or TODO\nabout making certain.\n\n+ * Add Tids on a block to TidStore. The caller must ensure the offset\nnumbers\n+ * in 'offsets' are ordered in ascending order.\n\nMust? What happens otherwise?\n\n+ uint64 last_key = PG_UINT64_MAX;\n\nI'm having some difficulty understanding this sentinel and how it's used.\n\n@@ -1039,11 +1040,18 @@ lazy_scan_heap(LVRelState *vacrel)\n if (prunestate.has_lpdead_items)\n {\n Size freespace;\n+ TidStoreIter *iter;\n+ TidStoreIterResult *result;\n\n- lazy_vacuum_heap_page(vacrel, blkno, buf, 0, &vmbuffer);\n+ iter = tidstore_begin_iterate(vacrel->dead_items);\n+ result = tidstore_iterate_next(iter);\n+ lazy_vacuum_heap_page(vacrel, blkno, result->offsets, result->num_offsets,\n+ buf, &vmbuffer);\n+ Assert(!tidstore_iterate_next(iter));\n+ tidstore_end_iterate(iter);\n\n /* Forget the LP_DEAD items that we just vacuumed */\n- dead_items->num_items = 0;\n+ tidstore_reset(dead_items);\n\nThis part only runs \"if (vacrel->nindexes == 0)\", so seems like unneeded\ncomplexity. It arises because lazy_scan_prune() populates the tid store\neven if no index vacuuming happens. Perhaps the caller of lazy_scan_prune()\ncould pass the deadoffsets array, and upon returning, either populate the\nstore or call lazy_vacuum_heap_page(), as needed. It's quite possible I'm\nmissing some detail, so some description of the design choices made would\nbe helpful.\n\nOn Mon, Jan 16, 2023 at 9:53 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> I've written a simple script to simulate the DSA memory usage and the\n> limit. The 75% limit works fine for a power of two cases, and we can\n> use the 60% limit for other cases (it seems we can use up to about 66%\n> but used 60% for safety). It would be best if we can mathematically\n> prove it but I could prove only the power of two cases. But the script\n> practically shows the 60% threshold would work for these cases.\n\nOkay. It's worth highlighting this in the comments, and also the fact that\nit depends on internal details of how DSA increases segment size.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 16 Jan 2023 12:01:57 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 16, 2023 at 2:02 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Thu, Jan 12, 2023 at 9:51 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Jan 12, 2023 at 5:21 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > Okay, I'll squash the previous patch and work on cleaning up the internals. I'll keep the external APIs the same so that your work on vacuum integration can be easily rebased on top of that, and we can work independently.\n>\n> There were some conflicts with HEAD, so to keep the CF bot busy, I've quickly put together v18. I still have a lot of cleanup work to do, but this is enough for now.\n\nThanks! cfbot complaints about some warnings but these are expected\n(due to unused delete routines etc). But one reported error[1] might\nbe relevant with 0002 patch?\n\n[05:44:11.759] \"link\" /MACHINE:x64\n/OUT:src/test/modules/test_radixtree/test_radixtree.dll\nsrc/test/modules/test_radixtree/test_radixtree.dll.p/win32ver.res\nsrc/test/modules/test_radixtree/test_radixtree.dll.p/test_radixtree.c.obj\n\"/nologo\" \"/release\" \"/nologo\" \"/DEBUG\"\n\"/PDB:src/test\\modules\\test_radixtree\\test_radixtree.pdb\" \"/DLL\"\n\"/IMPLIB:src/test\\modules\\test_radixtree\\test_radixtree.lib\"\n\"/INCREMENTAL:NO\" \"/STACK:4194304\" \"/NOEXP\" \"/DEBUG:FASTLINK\"\n\"/NOIMPLIB\" \"C:/cirrus/build/src/backend/postgres.exe.lib\"\n\"wldap32.lib\" \"c:/openssl/1.1/lib/libssl.lib\"\n\"c:/openssl/1.1/lib/libcrypto.lib\" \"ws2_32.lib\" \"kernel32.lib\"\n\"user32.lib\" \"gdi32.lib\" \"winspool.lib\" \"shell32.lib\" \"ole32.lib\"\n\"oleaut32.lib\" \"uuid.lib\" \"comdlg32.lib\" \"advapi32.lib\"\n[05:44:11.819] test_radixtree.c.obj : error LNK2001: unresolved\nexternal symbol pg_popcount64\n[05:44:11.819] src\\test\\modules\\test_radixtree\\test_radixtree.dll :\nfatal error LNK1120: 1 unresolved externals\n\n> 0003 contains all v17 local-memory coding squashed together.\n\n+ * XXX: Most functions in this file have two variants for inner nodes and leaf\n+ * nodes, therefore there are duplication codes. While this sometimes makes the\n+ * code maintenance tricky, this reduces branch prediction misses when judging\n+ * whether the node is a inner node of a leaf node.\n\nThis comment seems to be out-of-date since we made it a template.\n\n---\n+#ifndef RT_COMMON\n+#define RT_COMMON\n\nWhat are we using this macro RT_COMMON for?\n\n---\nThe following macros are defined but not undefined in radixtree.h:\n\nRT_MAKE_PREFIX\nRT_MAKE_NAME\nRT_MAKE_NAME_\nRT_SEARCH\nUINT64_FORMAT_HEX\nRT_NODE_SPAN\nRT_NODE_MAX_SLOTS\nRT_CHUNK_MASK\nRT_MAX_SHIFT\nRT_MAX_LEVEL\nRT_NODE_125_INVALID_IDX\nRT_GET_KEY_CHUNK\nBM_IDX\nBM_BIT\nRT_NODE_KIND_4\nRT_NODE_KIND_32\nRT_NODE_KIND_125\nRT_NODE_KIND_256\nRT_NODE_KIND_COUNT\nRT_PTR_LOCAL\nRT_PTR_ALLOC\nRT_INVALID_PTR_ALLOC\nNODE_SLAB_BLOCK_SIZE\n\n> 0004 perf test not updated but it doesn't build by default so it's fine for now\n\nOkay.\n\n> 0005 removes node.chunk as discussed, but does not change node4 fanout yet.\n\nLGTM.\n\n> 0006 is a small cleanup regarding setting node fanout.\n\nLGTM.\n\n> 0007 squashes my shared memory work with Masahiko's fixes from the addendum v17-0010.\n\n+ /* XXX: do we need to set a callback on exit to detach dsa? */\n\nIn the current shared radix tree design, it's a caller responsible\nthat they create (or attach to) a DSA area and pass it to RT_CREATE()\nor RT_ATTACH(). It enables us to use one DSA not only for the radix\ntree but also other data. Which is more flexible. So the caller needs\nto detach from the DSA somehow, so I think we don't need to set a\ncallback here for that.\n\n---\n+ dsa_free(tree->dsa, tree->ctl->handle); // XXX\n+ //dsa_detach(tree->dsa);\n\nSimilar to above, I think we should not detach from the DSA area here.\n\nGiven that the DSA area used by the radix tree could be used also by\nother data, I think that in RT_FREE() we need to free each radix tree\nnode allocated in DSA. In lazy vacuum, we check the memory usage\ninstead of the number of TIDs and need to reset the TidScan after an\nindex scan. So it does RT_FREE() and dsa_trim() to return DSM segments\nto the OS. I've implemented rt_free_recurse() for this purpose in the\nv15 version patch.\n\n--\n- Assert(tree->root);\n+ //Assert(tree->ctl->root);\n\nI think we don't need this assertion in the first place. We check it\nat the beginning of the function.\n\n---\n\n+#ifdef RT_NODE_LEVEL_LEAF\n+ Assert(NODE_IS_LEAF(node));\n+#else\n+ Assert(!NODE_IS_LEAF(node));\n+#endif\n+\n\nI think we can move this change to 0003 patch.\n\n> 0008 turns the existence checks in RT_NODE_UPDATE_INNER into Asserts, as discussed.\n\nLGTM.\n\n>\n> 0009/0010 are just copies of Masauiko's v17 addendum v17-0011/12, but the latter rebased over recent variable renaming (it's possible I missed something, so worth checking).\n>\n> > I've implemented the idea of using union. Let me share WIP code for\n> > discussion, I've attached three patches that can be applied on top of\n>\n> Seems fine as far as the union goes. Let's go ahead with this, and make progress on locking etc.\n\n+1\n\n>\n> > Overall, TidStore implementation with the union idea doesn't look so\n> > ugly to me. But I got many compiler warning about unused radix tree\n> > functions like:\n> >\n> > tidstore.c:99:19: warning: 'shared_rt_delete' defined but not used\n> > [-Wunused-function]\n> >\n> > I'm not sure there is a convenient way to suppress this warning but\n> > one idea is to have some macros to specify what operations are\n> > enabled/declared.\n>\n> That sounds like a good idea. It's also worth wondering if we even need RT_NUM_ENTRIES at all, since the caller is capable of keeping track of that if necessary. It's also misnamed, since it's concerned with the number of keys. The vacuum case cares about the number of TIDs, and not number of (encoded) keys. Even if we ever (say) changed the key to blocknumber and value to Bitmapset, the number of keys might not be interesting.\n\nRight. In fact, TIdStore doesn't use RT_NUM_ENTRIES.\n\n> It sounds like we should at least make the delete functionality optional. (Side note on optional functions: if an implementation didn't care about iteration or its order, we could optimize insertion into linear nodes)\n\nAgreed.\n\n>\n> Since this is WIP, you may already have some polish in mind, so I won't go over the patches in detail, but I wanted to ask about a few things (numbers referring to v17 addendum, not v18):\n>\n> 0011\n>\n> + * 'num_tids' is the number of Tids stored so far. 'max_byte' is the maximum\n> + * bytes a TidStore can use. These two fields are commonly used in both\n> + * non-shared case and shared case.\n> + */\n> + uint32 num_tids;\n>\n> uint32 is how we store the block number, so this too small and will wrap around on overflow. int64 seems better.\n\nAgreed, will fix.\n\n>\n> + * We calculate the maximum bytes for the TidStore in different ways\n> + * for non-shared case and shared case. Please refer to the comment\n> + * TIDSTORE_MEMORY_DEDUCT for details.\n> + */\n>\n> Maybe the #define and comment should be close to here.\n\nWill fix.\n\n>\n> + * Destroy a TidStore, returning all memory. The caller must be certain that\n> + * no other backend will attempt to access the TidStore before calling this\n> + * function. Other backend must explicitly call tidstore_detach to free up\n> + * backend-local memory associated with the TidStore. The backend that calls\n> + * tidstore_destroy must not call tidstore_detach.\n> + */\n> +void\n> +tidstore_destroy(TidStore *ts)\n>\n> If not addressed by next patch, need to phrase comment with FIXME or TODO about making certain.\n\nWill fix.\n\n>\n> + * Add Tids on a block to TidStore. The caller must ensure the offset numbers\n> + * in 'offsets' are ordered in ascending order.\n>\n> Must? What happens otherwise?\n\nIt ends up missing TIDs by overwriting the same key with different\nvalues. Is it better to have a bool argument, say need_sort, to sort\nthe given array if the caller wants?\n\n>\n> + uint64 last_key = PG_UINT64_MAX;\n>\n> I'm having some difficulty understanding this sentinel and how it's used.\n\nWill improve the logic.\n\n>\n> @@ -1039,11 +1040,18 @@ lazy_scan_heap(LVRelState *vacrel)\n> if (prunestate.has_lpdead_items)\n> {\n> Size freespace;\n> + TidStoreIter *iter;\n> + TidStoreIterResult *result;\n>\n> - lazy_vacuum_heap_page(vacrel, blkno, buf, 0, &vmbuffer);\n> + iter = tidstore_begin_iterate(vacrel->dead_items);\n> + result = tidstore_iterate_next(iter);\n> + lazy_vacuum_heap_page(vacrel, blkno, result->offsets, result->num_offsets,\n> + buf, &vmbuffer);\n> + Assert(!tidstore_iterate_next(iter));\n> + tidstore_end_iterate(iter);\n>\n> /* Forget the LP_DEAD items that we just vacuumed */\n> - dead_items->num_items = 0;\n> + tidstore_reset(dead_items);\n>\n> This part only runs \"if (vacrel->nindexes == 0)\", so seems like unneeded complexity. It arises because lazy_scan_prune() populates the tid store even if no index vacuuming happens. Perhaps the caller of lazy_scan_prune() could pass the deadoffsets array, and upon returning, either populate the store or call lazy_vacuum_heap_page(), as needed. It's quite possible I'm missing some detail, so some description of the design choices made would be helpful.\n\nI agree that we don't need complexity here. I'll try this idea.\n\n>\n> On Mon, Jan 16, 2023 at 9:53 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I've written a simple script to simulate the DSA memory usage and the\n> > limit. The 75% limit works fine for a power of two cases, and we can\n> > use the 60% limit for other cases (it seems we can use up to about 66%\n> > but used 60% for safety). It would be best if we can mathematically\n> > prove it but I could prove only the power of two cases. But the script\n> > practically shows the 60% threshold would work for these cases.\n>\n> Okay. It's worth highlighting this in the comments, and also the fact that it depends on internal details of how DSA increases segment size.\n\nAgreed.\n\nSince it seems you're working on another cleanup, I can address the\nabove comments after your work is completed. But I'm also fine with\nincluding them into your cleanup work.\n\nRegards,\n\n[1] https://cirrus-ci.com/task/5078505327689728\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 16 Jan 2023 17:18:02 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 16, 2023 at 3:18 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Mon, Jan 16, 2023 at 2:02 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n\n> Thanks! cfbot complaints about some warnings but these are expected\n> (due to unused delete routines etc). But one reported error[1] might\n> be relevant with 0002 patch?\n\n> [05:44:11.819] test_radixtree.c.obj : error LNK2001: unresolved\n> external symbol pg_popcount64\n> [05:44:11.819] src\\test\\modules\\test_radixtree\\test_radixtree.dll :\n> fatal error LNK1120: 1 unresolved externals\n\nYeah, I'm not sure what's causing that. Since that comes from a debugging\nfunction, we could work around it, but it would be nice to understand why,\nso I'll probably have to experiment on my CI repo.\n\n> ---\n> +#ifndef RT_COMMON\n> +#define RT_COMMON\n>\n> What are we using this macro RT_COMMON for?\n\nIt was a quick way to define some things only once, so they probably all\nshowed up in the list of things you found not undefined. It's different\nfrom the style of simplehash.h, which is to have a local name and #undef\nfor every single thing. simplehash.h is a precedent, so I'll change it to\nmatch. I'll take a look at your list, too.\n\n> > + * Add Tids on a block to TidStore. The caller must ensure the offset\nnumbers\n> > + * in 'offsets' are ordered in ascending order.\n> >\n> > Must? What happens otherwise?\n>\n> It ends up missing TIDs by overwriting the same key with different\n> values. Is it better to have a bool argument, say need_sort, to sort\n> the given array if the caller wants?\n\n> Since it seems you're working on another cleanup, I can address the\n> above comments after your work is completed. But I'm also fine with\n> including them into your cleanup work.\n\nI think we can work mostly simultaneously, if you work on tid store and\nvacuum, and I work on the template. We can always submit a full patchset\nincluding each other's latest work. That will catch rebase issues sooner.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Jan 16, 2023 at 3:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Mon, Jan 16, 2023 at 2:02 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> >> Thanks! cfbot complaints about some warnings but these are expected> (due to unused delete routines etc). But one reported error[1] might> be relevant with 0002 patch?> [05:44:11.819] test_radixtree.c.obj : error LNK2001: unresolved> external symbol pg_popcount64> [05:44:11.819] src\\test\\modules\\test_radixtree\\test_radixtree.dll :> fatal error LNK1120: 1 unresolved externalsYeah, I'm not sure what's causing that. Since that comes from a debugging function, we could work around it, but it would be nice to understand why, so I'll probably have to experiment on my CI repo.> ---> +#ifndef RT_COMMON> +#define RT_COMMON>> What are we using this macro RT_COMMON for?It was a quick way to define some things only once, so they probably all showed up in the list of things you found not undefined. It's different from the style of simplehash.h, which is to have a local name and #undef for every single thing. simplehash.h is a precedent, so I'll change it to match. I'll take a look at your list, too.> > + * Add Tids on a block to TidStore. The caller must ensure the offset numbers> > + * in 'offsets' are ordered in ascending order.> >> > Must? What happens otherwise?>> It ends up missing TIDs by overwriting the same key with different> values. Is it better to have a bool argument, say need_sort, to sort> the given array if the caller wants?> Since it seems you're working on another cleanup, I can address the> above comments after your work is completed. But I'm also fine with> including them into your cleanup work.I think we can work mostly simultaneously, if you work on tid store and vacuum, and I work on the template. We can always submit a full patchset including each other's latest work. That will catch rebase issues sooner.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 16 Jan 2023 17:11:32 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 16, 2023 at 3:18 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Mon, Jan 16, 2023 at 2:02 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\nAttached is an update that mostly has the modest goal of getting CI green\nagain. v19-0003 has squashed the entire radix tree template from\npreviously. I've kept out the perf test module for now -- still needs\nupdating.\n\n> > [05:44:11.819] test_radixtree.c.obj : error LNK2001: unresolved\n> > external symbol pg_popcount64\n> > [05:44:11.819] src\\test\\modules\\test_radixtree\\test_radixtree.dll :\n> > fatal error LNK1120: 1 unresolved externals\n>\n> Yeah, I'm not sure what's causing that. Since that comes from a debugging\nfunction, we could work around it, but it would be nice to understand why,\nso I'll probably have to experiment on my CI repo.\n\nI'm still confused by this error, because it only occurs in the test\nmodule. I successfully built with just 0002 in CI so elsewhere where bmw_*\nsymbols resolve just fine on all platforms. I've worked around the error in\nv19-0004 by using the general-purpose pg_popcount() function. We only need\nto count bits in assert builds, so it doesn't matter a whole lot.\n\n> + /* XXX: do we need to set a callback on exit to detach dsa? */\n>\n> In the current shared radix tree design, it's a caller responsible\n> that they create (or attach to) a DSA area and pass it to RT_CREATE()\n> or RT_ATTACH(). It enables us to use one DSA not only for the radix\n> tree but also other data. Which is more flexible. So the caller needs\n> to detach from the DSA somehow, so I think we don't need to set a\n> callback here for that.\n>\n> ---\n> + dsa_free(tree->dsa, tree->ctl->handle); // XXX\n> + //dsa_detach(tree->dsa);\n>\n> Similar to above, I think we should not detach from the DSA area here.\n>\n> Given that the DSA area used by the radix tree could be used also by\n> other data, I think that in RT_FREE() we need to free each radix tree\n> node allocated in DSA. In lazy vacuum, we check the memory usage\n> instead of the number of TIDs and need to reset the TidScan after an\n> index scan. So it does RT_FREE() and dsa_trim() to return DSM segments\n> to the OS. I've implemented rt_free_recurse() for this purpose in the\n> v15 version patch.\n>\n> --\n> - Assert(tree->root);\n> + //Assert(tree->ctl->root);\n>\n> I think we don't need this assertion in the first place. We check it\n> at the beginning of the function.\n\nI've removed these in v19-0006.\n\n> > That sounds like a good idea. It's also worth wondering if we even need\nRT_NUM_ENTRIES at all, since the caller is capable of keeping track of that\nif necessary. It's also misnamed, since it's concerned with the number of\nkeys. The vacuum case cares about the number of TIDs, and not number of\n(encoded) keys. Even if we ever (say) changed the key to blocknumber and\nvalue to Bitmapset, the number of keys might not be interesting.\n>\n> Right. In fact, TIdStore doesn't use RT_NUM_ENTRIES.\n\nI've moved it to the test module, which uses it extensively. There, the\nname is more clear what it's for, so I didn't change the name.\n\n> > It sounds like we should at least make the delete functionality\noptional. (Side note on optional functions: if an implementation didn't\ncare about iteration or its order, we could optimize insertion into linear\nnodes)\n>\n> Agreed.\n\nDone in v19-0007.\n\nv19-0009 is just a rebase over some more vacuum cleanups.\n\nI'll continue working on internals cleanup.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Tue, 17 Jan 2023 18:05:59 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 16, 2023 at 3:18 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Mon, Jan 16, 2023 at 2:02 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\n> > + * Add Tids on a block to TidStore. The caller must ensure the offset\nnumbers\n> > + * in 'offsets' are ordered in ascending order.\n> >\n> > Must? What happens otherwise?\n>\n> It ends up missing TIDs by overwriting the same key with different\n> values. Is it better to have a bool argument, say need_sort, to sort\n> the given array if the caller wants?\n\nNow that I've studied it some more, I see what's happening: We need all\nbits set in the \"value\" before we insert it, since it would be too\nexpensive to retrieve the current value, add one bit, and put it back.\nAlso, as a consequence of the encoding, part of the tid is in the key, and\npart in the value. It makes more sense now, but it needs more than zero\ncomments.\n\nAs for the order, I don't think it's the responsibility of the caller to\nguess if it needs sorting -- if unordered offsets lead to data loss, this\nfunction needs to take care of it.\n\n> > + uint64 last_key = PG_UINT64_MAX;\n> >\n> > I'm having some difficulty understanding this sentinel and how it's\nused.\n>\n> Will improve the logic.\n\nPart of the problem is the English language: \"last\" can mean \"previous\" or\n\"at the end\", so maybe some name changes would help.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Jan 16, 2023 at 3:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Mon, Jan 16, 2023 at 2:02 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> > + * Add Tids on a block to TidStore. The caller must ensure the offset numbers> > + * in 'offsets' are ordered in ascending order.> >> > Must? What happens otherwise?>> It ends up missing TIDs by overwriting the same key with different> values. Is it better to have a bool argument, say need_sort, to sort> the given array if the caller wants?Now that I've studied it some more, I see what's happening: We need all bits set in the \"value\" before we insert it, since it would be too expensive to retrieve the current value, add one bit, and put it back. Also, as a consequence of the encoding, part of the tid is in the key, and part in the value. It makes more sense now, but it needs more than zero comments.As for the order, I don't think it's the responsibility of the caller to guess if it needs sorting -- if unordered offsets lead to data loss, this function needs to take care of it.> > + uint64 last_key = PG_UINT64_MAX;> >> > I'm having some difficulty understanding this sentinel and how it's used.>> Will improve the logic.Part of the problem is the English language: \"last\" can mean \"previous\" or \"at the end\", so maybe some name changes would help.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 18 Jan 2023 11:44:14 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 17, 2023 at 8:06 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Jan 16, 2023 at 3:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Jan 16, 2023 at 2:02 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n>\n> Attached is an update that mostly has the modest goal of getting CI green again. v19-0003 has squashed the entire radix tree template from previously. I've kept out the perf test module for now -- still needs updating.\n>\n> > > [05:44:11.819] test_radixtree.c.obj : error LNK2001: unresolved\n> > > external symbol pg_popcount64\n> > > [05:44:11.819] src\\test\\modules\\test_radixtree\\test_radixtree.dll :\n> > > fatal error LNK1120: 1 unresolved externals\n> >\n> > Yeah, I'm not sure what's causing that. Since that comes from a debugging function, we could work around it, but it would be nice to understand why, so I'll probably have to experiment on my CI repo.\n>\n> I'm still confused by this error, because it only occurs in the test module. I successfully built with just 0002 in CI so elsewhere where bmw_* symbols resolve just fine on all platforms. I've worked around the error in v19-0004 by using the general-purpose pg_popcount() function. We only need to count bits in assert builds, so it doesn't matter a whole lot.\n\nI spent today investigating this issue, I found out that on Windows,\nlibpgport_src.a is not linked when building codes outside of\nsrc/backend unless explicitly linking it. It's not a problem on Linux\netc. but the linker raises a fatal error on Windows. I'm not sure the\nright way to fix it but the attached patch resolved the issue on\ncfbot. Since it seems not to be related to 0002 patch but maybe the\ndesigned behavior or a problem in meson. We can discuss it on a\nseparate thread.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 19 Jan 2023 00:49:04 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 17, 2023 at 8:06 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Jan 16, 2023 at 3:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Jan 16, 2023 at 2:02 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n>\n> Attached is an update that mostly has the modest goal of getting CI green again. v19-0003 has squashed the entire radix tree template from previously. I've kept out the perf test module for now -- still needs updating.\n>\n> > > [05:44:11.819] test_radixtree.c.obj : error LNK2001: unresolved\n> > > external symbol pg_popcount64\n> > > [05:44:11.819] src\\test\\modules\\test_radixtree\\test_radixtree.dll :\n> > > fatal error LNK1120: 1 unresolved externals\n> >\n> > Yeah, I'm not sure what's causing that. Since that comes from a debugging function, we could work around it, but it would be nice to understand why, so I'll probably have to experiment on my CI repo.\n>\n> I'm still confused by this error, because it only occurs in the test module. I successfully built with just 0002 in CI so elsewhere where bmw_* symbols resolve just fine on all platforms. I've worked around the error in v19-0004 by using the general-purpose pg_popcount() function. We only need to count bits in assert builds, so it doesn't matter a whole lot.\n>\n> > + /* XXX: do we need to set a callback on exit to detach dsa? */\n> >\n> > In the current shared radix tree design, it's a caller responsible\n> > that they create (or attach to) a DSA area and pass it to RT_CREATE()\n> > or RT_ATTACH(). It enables us to use one DSA not only for the radix\n> > tree but also other data. Which is more flexible. So the caller needs\n> > to detach from the DSA somehow, so I think we don't need to set a\n> > callback here for that.\n> >\n> > ---\n> > + dsa_free(tree->dsa, tree->ctl->handle); // XXX\n> > + //dsa_detach(tree->dsa);\n> >\n> > Similar to above, I think we should not detach from the DSA area here.\n> >\n> > Given that the DSA area used by the radix tree could be used also by\n> > other data, I think that in RT_FREE() we need to free each radix tree\n> > node allocated in DSA. In lazy vacuum, we check the memory usage\n> > instead of the number of TIDs and need to reset the TidScan after an\n> > index scan. So it does RT_FREE() and dsa_trim() to return DSM segments\n> > to the OS. I've implemented rt_free_recurse() for this purpose in the\n> > v15 version patch.\n> >\n> > --\n> > - Assert(tree->root);\n> > + //Assert(tree->ctl->root);\n> >\n> > I think we don't need this assertion in the first place. We check it\n> > at the beginning of the function.\n>\n> I've removed these in v19-0006.\n>\n> > > That sounds like a good idea. It's also worth wondering if we even need RT_NUM_ENTRIES at all, since the caller is capable of keeping track of that if necessary. It's also misnamed, since it's concerned with the number of keys. The vacuum case cares about the number of TIDs, and not number of (encoded) keys. Even if we ever (say) changed the key to blocknumber and value to Bitmapset, the number of keys might not be interesting.\n> >\n> > Right. In fact, TIdStore doesn't use RT_NUM_ENTRIES.\n>\n> I've moved it to the test module, which uses it extensively. There, the name is more clear what it's for, so I didn't change the name.\n>\n> > > It sounds like we should at least make the delete functionality optional. (Side note on optional functions: if an implementation didn't care about iteration or its order, we could optimize insertion into linear nodes)\n> >\n> > Agreed.\n>\n> Done in v19-0007.\n>\n> v19-0009 is just a rebase over some more vacuum cleanups.\n\nThank you for updating the patches!\n\nI've attached new version patches. There is no change from v19 patch\nfor 0001 through 0006. And 0004, 0005 and 0006 patches look good to\nme. We can merge them into 0003 patch.\n\n0007 patch fixes functions that are defined when RT_DEBUG. These\nfunctions might be removed before the commit but this is useful at\nleast under development. 0008 patch fixes a bug in\nRT_CHUNK_VALUES_ARRAY_SHIFT() and adds tests for that. 0009 patch\nfixes the cfbot issue by linking pgport_srv. 0010 patch adds\nRT_FREE_RECURSE() to free all radix tree nodes allocated in DSA. 0011\npatch updates copyright etc. 0012 and 0013 patches are updated patches\nthat incorporate all comments I got so far.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Fri, 20 Jan 2023 00:18:02 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 16, 2023 at 3:18 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Mon, Jan 16, 2023 at 2:02 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\nIn v21, all of your v20 improvements to the radix tree template and test\nhave been squashed into 0003, with one exception: v20-0010 (recursive\nfreeing of shared mem), which I've attached separately (for flexibility) as\nv21-0006. I believe one of your earlier patches had a new DSA function for\nfreeing memory more quickly -- was there a problem with that approach? I\ndon't recall where that discussion went.\n\n> + * XXX: Most functions in this file have two variants for inner nodes\nand leaf\n> + * nodes, therefore there are duplication codes. While this sometimes\nmakes the\n> + * code maintenance tricky, this reduces branch prediction misses when\njudging\n> + * whether the node is a inner node of a leaf node.\n>\n> This comment seems to be out-of-date since we made it a template.\n\nDone in 0020, along with a bunch of other comment editing.\n\n> The following macros are defined but not undefined in radixtree.h:\n\nFixed in v21-0018.\n\nAlso:\n\n0007 makes the value type configurable. Some debug functionality still\nassumes integer type, but I think the rest is agnostic.\n0010 turns node4 into node3, as discussed, going from 48 bytes to 32.\n0012 adopts the benchmark module to the template, and adds meson support\n(builds with warnings, but okay because not meant for commit).\n\nThe rest are cleanups, small refactorings, and more comment rewrites. I've\nkept them separate for visibility. Next patch can squash them unless there\nis any discussion.\n\n> > uint32 is how we store the block number, so this too small and will\nwrap around on overflow. int64 seems better.\n>\n> Agreed, will fix.\n\nGreat, but it's now uint64, not int64. All the large counters in struct\nLVRelState, for example, are signed integers, as the usual practice.\nUnsigned ints are \"usually\" for things like bit patterns and where explicit\nwraparound is desired. There's probably more that can be done here to\nchange to signed types, but I think it's still a bit early to get to that\nlevel of nitpicking. (Soon, I hope :-) )\n\n> > + * We calculate the maximum bytes for the TidStore in different ways\n> > + * for non-shared case and shared case. Please refer to the comment\n> > + * TIDSTORE_MEMORY_DEDUCT for details.\n> > + */\n> >\n> > Maybe the #define and comment should be close to here.\n>\n> Will fix.\n\nFor this, I intended that \"here\" meant \"in or just above the function\".\n\n+#define TIDSTORE_LOCAL_MAX_MEMORY_DEDUCT (1024L * 70) /* 70kB */\n+#define TIDSTORE_SHARED_MAX_MEMORY_RATIO_PO2 (float) 0.75\n+#define TIDSTORE_SHARED_MAX_MEMORY_RATIO (float) 0.6\n\nThese symbols are used only once, in tidstore_create(), and are difficult\nto read. That function has few comments. The symbols have several\nparagraphs, but they are far away. It might be better for readability to\njust hard-code numbers in the function, with the explanation about the\nnumbers near where they are used.\n\n> > + * Destroy a TidStore, returning all memory. The caller must be\ncertain that\n> > + * no other backend will attempt to access the TidStore before calling\nthis\n> > + * function. Other backend must explicitly call tidstore_detach to\nfree up\n> > + * backend-local memory associated with the TidStore. The backend that\ncalls\n> > + * tidstore_destroy must not call tidstore_detach.\n> > + */\n> > +void\n> > +tidstore_destroy(TidStore *ts)\n> >\n> > If not addressed by next patch, need to phrase comment with FIXME or\nTODO about making certain.\n>\n> Will fix.\n\nDid anything change here? There is also this, in the template, which I'm\nnot sure has been addressed:\n\n * XXX: Currently we allow only one process to do iteration. Therefore,\nrt_node_iter\n * has the local pointers to nodes, rather than RT_PTR_ALLOC.\n * We need either a safeguard to disallow other processes to begin the\niteration\n * while one process is doing or to allow multiple processes to do the\niteration.\n\n> > This part only runs \"if (vacrel->nindexes == 0)\", so seems like\nunneeded complexity. It arises because lazy_scan_prune() populates the tid\nstore even if no index vacuuming happens. Perhaps the caller of\nlazy_scan_prune() could pass the deadoffsets array, and upon returning,\neither populate the store or call lazy_vacuum_heap_page(), as needed. It's\nquite possible I'm missing some detail, so some description of the design\nchoices made would be helpful.\n>\n> I agree that we don't need complexity here. I'll try this idea.\n\nKeeping the offsets array in the prunestate seems to work out well.\n\nSome other quick comments on tid store and vacuum, not comprehensive. Let\nme know if I've misunderstood something:\n\nTID store:\n\n+ * XXXXXXXX XXXYYYYY YYYYYYYY YYYYYYYY YYYYYYYY YYYuuuu\n+ *\n+ * X = bits used for offset number\n+ * Y = bits used for block number\n+ * u = unused bit\n\nI was confused for a while, and I realized the bits are in reverse order\nfrom how they are usually pictured (high on left, low on the right).\n\n+ * 11 bits enough for the offset number, because MaxHeapTuplesPerPage <\n2^11\n+ * on all supported block sizes (TIDSTORE_OFFSET_NBITS). We are frugal with\n\n+ * XXX: if we want to support non-heap table AM that want to use the full\n+ * range of possible offset numbers, we'll need to reconsider\n+ * TIDSTORE_OFFSET_NBITS value.\n\nWould it be worth it (or possible) to calculate constants based on\ncompile-time block size? And/or have a fallback for other table AMs? Since\nthis file is in access/common, the intention is to allow general-purpose, I\nimagine.\n\n+typedef dsa_pointer tidstore_handle;\n\nIt's not clear why we need a typedef here, since here:\n\n+tidstore_attach(dsa_area *area, tidstore_handle handle)\n+{\n+ TidStore *ts;\n+ dsa_pointer control;\n...\n+ control = handle;\n\n...there is a differently-named dsa_pointer variable that just gets the\nfunction parameter.\n\n+/* Return the maximum memory TidStore can use */\n+uint64\n+tidstore_max_memory(TidStore *ts)\n\nsize_t is more suitable for memory.\n\n+ /*\n+ * Since the shared radix tree supports concurrent insert,\n+ * we don't need to acquire the lock.\n+ */\n\nHmm? IIUC, the caller only acquires the lock after returning from here, to\nupdate statistics. Why is it safe to insert with no lock? Am I missing\nsomething?\n\nVACUUM integration:\n\n-#define PARALLEL_VACUUM_KEY_DEAD_ITEMS 2\n+#define PARALLEL_VACUUM_KEY_DSA 2\n\nSeems like unnecessary churn? It is still all about dead items, after all.\nI understand using \"DSA\" for the LWLock, since that matches surrounding\ncode.\n\n+#define HAS_LPDEAD_ITEMS(state) (((state).lpdead_items) > 0)\n\nThis macro helps the patch readability in some places, but I'm not sure it\nhelps readability of the file as a whole. The following is in the patch and\nseems perfectly clear without the macro:\n\n- if (lpdead_items > 0)\n+ if (prunestate->lpdead_items > 0)\n\nAbout shared memory: I have some mild reservations about the naming of the\n\"control object\", which may be in shared memory. Is that an established\nterm? (If so, disregard the rest): It seems backwards -- the thing in\nshared memory is the actual tree itself. The thing in backend-local memory\nhas the \"handle\", and that's how we control the tree. I don't have a better\nnaming scheme, though, and might not be that important. (Added a WIP\ncomment)\n\nNow might be a good time to look at earlier XXX comments and come up with a\nplan to address them.\n\nThat's all I have for now.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 23 Jan 2023 18:20:06 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Attached is a rebase to fix conflicts from recent commits.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 23 Jan 2023 19:29:33 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 23, 2023 at 6:00 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> Attached is a rebase to fix conflicts from recent commits.\n\nI have reviewed v22-0022* patch and I have some comments.\n\n1.\n>It also changes to the column names max_dead_tuples and num_dead_tuples and to\n>show the progress information in bytes.\n\nI think this statement needs to be rephrased.\n\n2.\n\n/*\n * vac_tid_reaped() -- is a particular tid deletable?\n *\n * This has the right signature to be an IndexBulkDeleteCallback.\n *\n * Assumes dead_items array is sorted (in ascending TID order).\n */\n\nI think this comment 'Assumes dead_items array is sorted' is not valid anymore.\n\n3.\n\nWe are changing the min value of 'maintenance_work_mem' to 2MB. Should\nwe do the same for the 'autovacuum_work_mem'?\n\n4.\n+\n+ /* collected LP_DEAD items including existing LP_DEAD items */\n+ int lpdead_items;\n+ OffsetNumber deadoffsets[MaxHeapTuplesPerPage];\n\nWe are actually collecting dead offsets but the variable name says\n'lpdead_items' instead of something like ndeadoffsets num_deadoffsets.\nAnd the comment is also saying dead items.\n\n5.\n/*\n * lazy_vacuum_heap_page() -- free page's LP_DEAD items listed in the\n * vacrel->dead_items array.\n *\n * Caller must have an exclusive buffer lock on the buffer (though a full\n * cleanup lock is also acceptable). vmbuffer must be valid and already have\n * a pin on blkno's visibility map page.\n *\n * index is an offset into the vacrel->dead_items array for the first listed\n * LP_DEAD item on the page. The return value is the first index immediately\n * after all LP_DEAD items for the same page in the array.\n */\n\nThis comment needs to be changed as this is referring to the\n'vacrel->dead_items array' which no longer exists.\n\n-- \nRegards,\nDilip Kumar\nEnterpriseDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Tue, 24 Jan 2023 11:47:13 +0530", "msg_from": "Dilip Kumar <dilipbalaut@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 23, 2023 at 8:20 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Jan 16, 2023 at 3:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Jan 16, 2023 at 2:02 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n>\n> In v21, all of your v20 improvements to the radix tree template and test have been squashed into 0003, with one exception: v20-0010 (recursive freeing of shared mem), which I've attached separately (for flexibility) as v21-0006. I believe one of your earlier patches had a new DSA function for freeing memory more quickly -- was there a problem with that approach? I don't recall where that discussion went.\n\nHmm, I don't remember I proposed such a patch, either.\n\nOne idea to address it would be that we pass a shared memory to\nRT_CREATE() and we create a DSA area dedicated to the radix tree in\nplace. We should return the created DSA area along with the radix tree\nso that the caller can use it (e.g., for dsa_get_handle(), dsa_pin(),\nand dsa_pin_mapping() etc). In RT_FREE(), we just detach from the DSA\narea. A downside of this idea would be that one DSA area only for a\nradix tree is always required.\n\nAnother idea would be that we allocate a big enough DSA area and\nquarry small memory for nodes from there. But it would need to\nintroduce another complexity so I prefer to avoid it.\n\nFYI the current design is inspired by dshash.c. In dshash_destory(),\nwe dsa_free() each elements allocated by dshash.c\n\n>\n> > + * XXX: Most functions in this file have two variants for inner nodes and leaf\n> > + * nodes, therefore there are duplication codes. While this sometimes makes the\n> > + * code maintenance tricky, this reduces branch prediction misses when judging\n> > + * whether the node is a inner node of a leaf node.\n> >\n> > This comment seems to be out-of-date since we made it a template.\n>\n> Done in 0020, along with a bunch of other comment editing.\n>\n> > The following macros are defined but not undefined in radixtree.h:\n>\n> Fixed in v21-0018.\n>\n> Also:\n>\n> 0007 makes the value type configurable. Some debug functionality still assumes integer type, but I think the rest is agnostic.\n\nradixtree_search_impl.h still assumes that the value type is an\ninteger type as follows:\n\n#ifdef RT_NODE_LEVEL_LEAF\n RT_VALUE_TYPE value = 0;\n\n Assert(RT_NODE_IS_LEAF(node));\n#else\n\nAlso, I think if we make the value type configurable, it's better to\npass the pointer of the value to RT_SET() instead of copying the\nvalues since the value size could be large.\n\n> 0010 turns node4 into node3, as discussed, going from 48 bytes to 32.\n> 0012 adopts the benchmark module to the template, and adds meson support (builds with warnings, but okay because not meant for commit).\n>\n> The rest are cleanups, small refactorings, and more comment rewrites. I've kept them separate for visibility. Next patch can squash them unless there is any discussion.\n\n0008 patch\n\n for (int i = 0; i < RT_SIZE_CLASS_COUNT; i++)\n- fprintf(stderr, \"%s\\tinner_size %zu\\tinner_blocksize\n%zu\\tleaf_size %zu\\tleaf_blocksize %zu\\n\",\n+ fprintf(stderr, \"%s\\tinner_size %zu\\tleaf_size %zu\\t%zu\\n\",\n RT_SIZE_CLASS_INFO[i].name,\n RT_SIZE_CLASS_INFO[i].inner_size,\n- RT_SIZE_CLASS_INFO[i].inner_blocksize,\n- RT_SIZE_CLASS_INFO[i].leaf_size,\n- RT_SIZE_CLASS_INFO[i].leaf_blocksize);\n+ RT_SIZE_CLASS_INFO[i].leaf_size);\n\nThere is additional '%zu' at the end of the format string:\n\n---\n0011 patch\n\n+ * 1. With 5 or more kinds, gcc tends to use a jump table for switch\n+ * statments.\n\ntypo: s/statments/statements/\n\nThe rest look good to me. I'll incorporate these fixes in the next\nversion patch.\n\n>\n> > > uint32 is how we store the block number, so this too small and will wrap around on overflow. int64 seems better.\n> >\n> > Agreed, will fix.\n>\n> Great, but it's now uint64, not int64. All the large counters in struct LVRelState, for example, are signed integers, as the usual practice. Unsigned ints are \"usually\" for things like bit patterns and where explicit wraparound is desired. There's probably more that can be done here to change to signed types, but I think it's still a bit early to get to that level of nitpicking. (Soon, I hope :-) )\n\nAgreed. I'll change it in the next version patch.\n\n>\n> > > + * We calculate the maximum bytes for the TidStore in different ways\n> > > + * for non-shared case and shared case. Please refer to the comment\n> > > + * TIDSTORE_MEMORY_DEDUCT for details.\n> > > + */\n> > >\n> > > Maybe the #define and comment should be close to here.\n> >\n> > Will fix.\n>\n> For this, I intended that \"here\" meant \"in or just above the function\".\n>\n> +#define TIDSTORE_LOCAL_MAX_MEMORY_DEDUCT (1024L * 70) /* 70kB */\n> +#define TIDSTORE_SHARED_MAX_MEMORY_RATIO_PO2 (float) 0.75\n> +#define TIDSTORE_SHARED_MAX_MEMORY_RATIO (float) 0.6\n>\n> These symbols are used only once, in tidstore_create(), and are difficult to read. That function has few comments. The symbols have several paragraphs, but they are far away. It might be better for readability to just hard-code numbers in the function, with the explanation about the numbers near where they are used.\n\nAgreed, will fix.\n\n>\n> > > + * Destroy a TidStore, returning all memory. The caller must be certain that\n> > > + * no other backend will attempt to access the TidStore before calling this\n> > > + * function. Other backend must explicitly call tidstore_detach to free up\n> > > + * backend-local memory associated with the TidStore. The backend that calls\n> > > + * tidstore_destroy must not call tidstore_detach.\n> > > + */\n> > > +void\n> > > +tidstore_destroy(TidStore *ts)\n> > >\n> > > If not addressed by next patch, need to phrase comment with FIXME or TODO about making certain.\n> >\n> > Will fix.\n>\n> Did anything change here?\n\nOops, the fix is missed in the patch for some reason. I'll fix it.\n\n> There is also this, in the template, which I'm not sure has been addressed:\n>\n> * XXX: Currently we allow only one process to do iteration. Therefore, rt_node_iter\n> * has the local pointers to nodes, rather than RT_PTR_ALLOC.\n> * We need either a safeguard to disallow other processes to begin the iteration\n> * while one process is doing or to allow multiple processes to do the iteration.\n\nIt's not addressed yet. I think adding a safeguard is better for the\nfirst version. A simple solution is to add a flag, say iter_active, to\nallow only one process to enable the iteration. What do you think?\n\n>\n> > > This part only runs \"if (vacrel->nindexes == 0)\", so seems like unneeded complexity. It arises because lazy_scan_prune() populates the tid store even if no index vacuuming happens. Perhaps the caller of lazy_scan_prune() could pass the deadoffsets array, and upon returning, either populate the store or call lazy_vacuum_heap_page(), as needed. It's quite possible I'm missing some detail, so some description of the design choices made would be helpful.\n> >\n> > I agree that we don't need complexity here. I'll try this idea.\n>\n> Keeping the offsets array in the prunestate seems to work out well.\n>\n> Some other quick comments on tid store and vacuum, not comprehensive. Let me know if I've misunderstood something:\n>\n> TID store:\n>\n> + * XXXXXXXX XXXYYYYY YYYYYYYY YYYYYYYY YYYYYYYY YYYuuuu\n> + *\n> + * X = bits used for offset number\n> + * Y = bits used for block number\n> + * u = unused bit\n>\n> I was confused for a while, and I realized the bits are in reverse order from how they are usually pictured (high on left, low on the right).\n\nI borrowed it from ginpostinglist.c but it seems better to write in\nthe common order.\n\n>\n> + * 11 bits enough for the offset number, because MaxHeapTuplesPerPage < 2^11\n> + * on all supported block sizes (TIDSTORE_OFFSET_NBITS). We are frugal with\n>\n> + * XXX: if we want to support non-heap table AM that want to use the full\n> + * range of possible offset numbers, we'll need to reconsider\n> + * TIDSTORE_OFFSET_NBITS value.\n>\n> Would it be worth it (or possible) to calculate constants based on compile-time block size? And/or have a fallback for other table AMs? Since this file is in access/common, the intention is to allow general-purpose, I imagine.\n\nI think we can pass the maximum offset numbers to tidstore_create()\nand calculate these values.\n\n>\n> +typedef dsa_pointer tidstore_handle;\n>\n> It's not clear why we need a typedef here, since here:\n>\n> +tidstore_attach(dsa_area *area, tidstore_handle handle)\n> +{\n> + TidStore *ts;\n> + dsa_pointer control;\n> ...\n> + control = handle;\n>\n> ...there is a differently-named dsa_pointer variable that just gets the function parameter.\n\nI guess one reason is to improve compatibility; we can stash the\nactual value of the handle, which could help some cases, for example,\nwhen we need to change the actual value of the handle. dshash.c uses\nthe same idea. Another reason would be to improve readability.\n\n>\n> +/* Return the maximum memory TidStore can use */\n> +uint64\n> +tidstore_max_memory(TidStore *ts)\n>\n> size_t is more suitable for memory.\n\nWIll fix.\n\n>\n> + /*\n> + * Since the shared radix tree supports concurrent insert,\n> + * we don't need to acquire the lock.\n> + */\n>\n> Hmm? IIUC, the caller only acquires the lock after returning from here, to update statistics. Why is it safe to insert with no lock? Am I missing something?\n\nYou're right. I was missing something. The lock should be taken before\nadding key-value pairs.\n\n>\n> VACUUM integration:\n>\n> -#define PARALLEL_VACUUM_KEY_DEAD_ITEMS 2\n> +#define PARALLEL_VACUUM_KEY_DSA 2\n>\n> Seems like unnecessary churn? It is still all about dead items, after all. I understand using \"DSA\" for the LWLock, since that matches surrounding code.\n\nAgreed, will remove.\n\n>\n> +#define HAS_LPDEAD_ITEMS(state) (((state).lpdead_items) > 0)\n>\n> This macro helps the patch readability in some places, but I'm not sure it helps readability of the file as a whole. The following is in the patch and seems perfectly clear without the macro:\n>\n> - if (lpdead_items > 0)\n> + if (prunestate->lpdead_items > 0)\n\nWill remove the macro.\n\n>\n> About shared memory: I have some mild reservations about the naming of the \"control object\", which may be in shared memory. Is that an established term? (If so, disregard the rest): It seems backwards -- the thing in shared memory is the actual tree itself. The thing in backend-local memory has the \"handle\", and that's how we control the tree. I don't have a better naming scheme, though, and might not be that important. (Added a WIP comment)\n\nThat seems a valid concern. I borrowed the \"control object\" from\ndshash.c but it supports only shared cases. The fact that the radix\ntree supports both local and shared seems to introduce this confusion.\nI came up with other names such as RT_RADIX_TREE_CORE or\nRT_RADIX_TREE_ROOT but not sure these are better than the current\none.\n\n>\n> Now might be a good time to look at earlier XXX comments and come up with a plan to address them.\n\nAgreed.\n\nOther XXX comments that are not mentioned yet are:\n\n+ /* XXX: memory context support */\n+ tree = (RT_RADIX_TREE *) palloc0(sizeof(RT_RADIX_TREE));\n\nI'm not sure we really need memory context support for RT_ATTACH()\nsince in the shared case, we allocate backend-local memory only for\nRT_RADIX_TREE.\n\n---\n+RT_SCOPE uint64\n+RT_MEMORY_USAGE(RT_RADIX_TREE *tree)\n+{\n+ // XXX is this necessary?\n+ Size total = sizeof(RT_RADIX_TREE);\n\nRegarding this, I followed intset_memory_usage(). But in the radix\ntree, RT_RADIX_TREE is very small so probably we can ignore it.\n\n---\n+/* XXX For display, assumes value type is numeric */\n+static void\n+RT_DUMP_NODE(RT_PTR_LOCAL node, int level, bool recurse)\n\nI think we can display values in hex encoded format but given the\nvalue could be large, we don't necessarily need to display actual\nvalues. Displaying the tree structure and chunks would be helpful for\ndebugging the radix tree.\n\n---\nThere is no XXX comment but I'll try to add lock support in the next\nversion patch.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 25 Jan 2023 10:42:05 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jan 25, 2023 at 8:42 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Mon, Jan 23, 2023 at 8:20 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > On Mon, Jan 16, 2023 at 3:18 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> > >\n> > > On Mon, Jan 16, 2023 at 2:02 PM John Naylor\n> > > <john.naylor@enterprisedb.com> wrote:\n> >\n> > In v21, all of your v20 improvements to the radix tree template and\ntest have been squashed into 0003, with one exception: v20-0010 (recursive\nfreeing of shared mem), which I've attached separately (for flexibility) as\nv21-0006. I believe one of your earlier patches had a new DSA function for\nfreeing memory more quickly -- was there a problem with that approach? I\ndon't recall where that discussion went.\n>\n> Hmm, I don't remember I proposed such a patch, either.\n\nI went looking, and it turns out I remembered wrong, sorry.\n\n> One idea to address it would be that we pass a shared memory to\n> RT_CREATE() and we create a DSA area dedicated to the radix tree in\n> place. We should return the created DSA area along with the radix tree\n> so that the caller can use it (e.g., for dsa_get_handle(), dsa_pin(),\n> and dsa_pin_mapping() etc). In RT_FREE(), we just detach from the DSA\n> area. A downside of this idea would be that one DSA area only for a\n> radix tree is always required.\n>\n> Another idea would be that we allocate a big enough DSA area and\n> quarry small memory for nodes from there. But it would need to\n> introduce another complexity so I prefer to avoid it.\n>\n> FYI the current design is inspired by dshash.c. In dshash_destory(),\n> we dsa_free() each elements allocated by dshash.c\n\nOkay, thanks for the info.\n\n> > 0007 makes the value type configurable. Some debug functionality still\nassumes integer type, but I think the rest is agnostic.\n>\n> radixtree_search_impl.h still assumes that the value type is an\n> integer type as follows:\n>\n> #ifdef RT_NODE_LEVEL_LEAF\n> RT_VALUE_TYPE value = 0;\n>\n> Assert(RT_NODE_IS_LEAF(node));\n> #else\n>\n> Also, I think if we make the value type configurable, it's better to\n> pass the pointer of the value to RT_SET() instead of copying the\n> values since the value size could be large.\n\nThanks, I will remove the assignment and look into pass-by-reference.\n\n> Oops, the fix is missed in the patch for some reason. I'll fix it.\n>\n> > There is also this, in the template, which I'm not sure has been\naddressed:\n> >\n> > * XXX: Currently we allow only one process to do iteration. Therefore,\nrt_node_iter\n> > * has the local pointers to nodes, rather than RT_PTR_ALLOC.\n> > * We need either a safeguard to disallow other processes to begin the\niteration\n> > * while one process is doing or to allow multiple processes to do the\niteration.\n>\n> It's not addressed yet. I think adding a safeguard is better for the\n> first version. A simple solution is to add a flag, say iter_active, to\n> allow only one process to enable the iteration. What do you think?\n\nI don't quite have enough info to offer an opinion, but this sounds like a\ndifferent form of locking. I'm sure it's come up before, but could you\ndescribe why iteration is different from other operations, regarding\nconcurrency?\n\n> > Would it be worth it (or possible) to calculate constants based on\ncompile-time block size? And/or have a fallback for other table AMs? Since\nthis file is in access/common, the intention is to allow general-purpose, I\nimagine.\n>\n> I think we can pass the maximum offset numbers to tidstore_create()\n> and calculate these values.\n\nThat would work easily for vacuumlazy.c, since it's in the \"heap\" subdir so\nwe know the max possible offset. I haven't looked at vacuumparallel.c, but\nI can tell it is not in a heap-specific directory, so I don't know how easy\nthat would be to pass along the right value.\n\n> > About shared memory: I have some mild reservations about the naming of\nthe \"control object\", which may be in shared memory. Is that an established\nterm? (If so, disregard the rest): It seems backwards -- the thing in\nshared memory is the actual tree itself. The thing in backend-local memory\nhas the \"handle\", and that's how we control the tree. I don't have a better\nnaming scheme, though, and might not be that important. (Added a WIP\ncomment)\n>\n> That seems a valid concern. I borrowed the \"control object\" from\n> dshash.c but it supports only shared cases. The fact that the radix\n> tree supports both local and shared seems to introduce this confusion.\n> I came up with other names such as RT_RADIX_TREE_CORE or\n> RT_RADIX_TREE_ROOT but not sure these are better than the current\n> one.\n\nOkay, if dshash uses it, we have some precedent.\n\n> > Now might be a good time to look at earlier XXX comments and come up\nwith a plan to address them.\n>\n> Agreed.\n>\n> Other XXX comments that are not mentioned yet are:\n>\n> + /* XXX: memory context support */\n> + tree = (RT_RADIX_TREE *) palloc0(sizeof(RT_RADIX_TREE));\n>\n> I'm not sure we really need memory context support for RT_ATTACH()\n> since in the shared case, we allocate backend-local memory only for\n> RT_RADIX_TREE.\n\nOkay, we can remove this.\n\n> ---\n> +RT_SCOPE uint64\n> +RT_MEMORY_USAGE(RT_RADIX_TREE *tree)\n> +{\n> + // XXX is this necessary?\n> + Size total = sizeof(RT_RADIX_TREE);\n>\n> Regarding this, I followed intset_memory_usage(). But in the radix\n> tree, RT_RADIX_TREE is very small so probably we can ignore it.\n\nThat was more a note to myself that I forgot about, so here is my\nreasoning: In the shared case, we just overwrite that initial total, but\nfor the local case we add to it. A future reader could think this is\ninconsistent and needs to be fixed. Since we deduct from the guc limit to\nguard against worst-case re-allocation, and that deduction is not very\nprecise (nor needs to be), I agree we should just forget about tiny sizes\nlike this in both cases.\n\n> ---\n> +/* XXX For display, assumes value type is numeric */\n> +static void\n> +RT_DUMP_NODE(RT_PTR_LOCAL node, int level, bool recurse)\n>\n> I think we can display values in hex encoded format but given the\n> value could be large, we don't necessarily need to display actual\n> values. Displaying the tree structure and chunks would be helpful for\n> debugging the radix tree.\n\nOkay, I can try that unless you do it first.\n\n> There is no XXX comment but I'll try to add lock support in the next\n> version patch.\n\nSince there were calls to LWLockAcquire/Release in the last version, I'm a\nbit confused by this. Perhaps for the next patch, the email should contain\na few sentences describing how locking is intended to work, including for\niteration.\n\nHmm, I wonder if we need to use the isolation tester. It's both a blessing\nand a curse that the first client of this data structure is tid lookup.\nIt's a blessing because it doesn't present a highly-concurrent workload\nmixing reads and writes and so simple locking is adequate. It's a curse\nbecause to test locking and have any chance of finding bugs, we can't rely\non vacuum to tell us that because (as you've said) it might very well work\nfine with no locking at all. So we must come up with test cases ourselves.\n\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Wed, Jan 25, 2023 at 8:42 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Mon, Jan 23, 2023 at 8:20 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> >> > On Mon, Jan 16, 2023 at 3:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> > >> > > On Mon, Jan 16, 2023 at 2:02 PM John Naylor> > > <john.naylor@enterprisedb.com> wrote:> >> > In v21, all of your v20 improvements to the radix tree template and test have been squashed into 0003, with one exception: v20-0010 (recursive freeing of shared mem), which I've attached separately (for flexibility) as v21-0006. I believe one of your earlier patches had a new DSA function for freeing memory more quickly -- was there a problem with that approach? I don't recall where that discussion went.>> Hmm, I don't remember I proposed such a patch, either.I went looking, and it turns out I remembered wrong, sorry.> One idea to address it would be that we pass a shared memory to> RT_CREATE() and we create a DSA area dedicated to the radix tree in> place. We should return the created DSA area along with the radix tree> so that the caller can use it (e.g., for dsa_get_handle(), dsa_pin(),> and dsa_pin_mapping() etc). In RT_FREE(), we just detach from the DSA> area. A downside of this idea would be that one DSA area only for a> radix tree is always required.>> Another idea would be that we allocate a big enough DSA area and> quarry small memory for nodes from there. But it would need to> introduce another complexity so I prefer to avoid it.>> FYI the current design is inspired by dshash.c. In dshash_destory(),> we dsa_free() each elements allocated by dshash.cOkay, thanks for the info.> > 0007 makes the value type configurable. Some debug functionality still assumes integer type, but I think the rest is agnostic.>> radixtree_search_impl.h still assumes that the value type is an> integer type as follows:>> #ifdef RT_NODE_LEVEL_LEAF>     RT_VALUE_TYPE       value = 0;>>     Assert(RT_NODE_IS_LEAF(node));> #else>> Also, I think if we make the value type configurable, it's better to> pass the pointer of the value to RT_SET() instead of copying the> values since the value size could be large.Thanks, I will remove the assignment and look into pass-by-reference.> Oops, the fix is missed in the patch for some reason. I'll fix it.>> > There is also this, in the template, which I'm not sure has been addressed:> >> >  * XXX: Currently we allow only one process to do iteration. Therefore, rt_node_iter> >  * has the local pointers to nodes, rather than RT_PTR_ALLOC.> >  * We need either a safeguard to disallow other processes to begin the iteration> >  * while one process is doing or to allow multiple processes to do the iteration.>> It's not addressed yet. I think adding a safeguard is better for the> first version. A simple solution is to add a flag, say iter_active, to> allow only one process to enable the iteration. What do you think?I don't quite have enough info to offer an opinion, but this sounds like a different form of locking. I'm sure it's come up before, but could you describe why iteration is different from other operations, regarding concurrency?> > Would it be worth it (or possible) to calculate constants based on compile-time block size? And/or have a fallback for other table AMs? Since this file is in access/common, the intention is to allow general-purpose, I imagine.>> I think we can pass the maximum offset numbers to tidstore_create()> and calculate these values.That would work easily for vacuumlazy.c, since it's in the \"heap\" subdir so we know the max possible offset. I haven't looked at vacuumparallel.c, but I can tell it is not in a heap-specific directory, so I don't know how easy that would be to pass along the right value.> > About shared memory: I have some mild reservations about the naming of the \"control object\", which may be in shared memory. Is that an established term? (If so, disregard the rest): It seems backwards -- the thing in shared memory is the actual tree itself. The thing in backend-local memory has the \"handle\", and that's how we control the tree. I don't have a better naming scheme, though, and might not be that important. (Added a WIP comment)>> That seems a valid concern. I borrowed the \"control object\" from> dshash.c but it supports only shared cases. The fact that the radix> tree supports both local and shared seems to introduce this confusion.> I came up with other names such as RT_RADIX_TREE_CORE or> RT_RADIX_TREE_ROOT  but not sure these are better than the current> one.Okay, if dshash uses it, we have some precedent.> > Now might be a good time to look at earlier XXX comments and come up with a plan to address them.>> Agreed.>> Other XXX comments that are not mentioned yet are:>> +   /* XXX: memory context support */> +   tree = (RT_RADIX_TREE *) palloc0(sizeof(RT_RADIX_TREE));>> I'm not sure we really need memory context support for RT_ATTACH()> since in the shared case, we allocate backend-local memory only for> RT_RADIX_TREE.Okay, we can remove this.> ---> +RT_SCOPE uint64> +RT_MEMORY_USAGE(RT_RADIX_TREE *tree)> +{> +   // XXX is this necessary?> +   Size        total = sizeof(RT_RADIX_TREE);>> Regarding this, I followed intset_memory_usage(). But in the radix> tree, RT_RADIX_TREE is very small so probably we can ignore it.That was more a note to myself that I forgot about, so here is my reasoning: In the shared case, we just overwrite that initial total, but for the local case we add to it. A future reader could think this is inconsistent and needs to be fixed. Since we deduct from the guc limit to guard against worst-case re-allocation, and that deduction is not very precise (nor needs to be), I agree we should just forget about tiny sizes like this in both cases.> ---> +/* XXX For display, assumes value type is numeric */> +static void> +RT_DUMP_NODE(RT_PTR_LOCAL node, int level, bool recurse)>> I think we can display values in hex encoded format but given the> value could be large, we don't necessarily need to display actual> values. Displaying the tree structure and chunks would be helpful for> debugging the radix tree.Okay, I can try that unless you do it first.> There is no XXX comment but I'll try to add lock support in the next> version patch.Since there were calls to LWLockAcquire/Release in the last version, I'm a bit confused by this. Perhaps for the next patch, the email should contain a few sentences describing how locking is intended to work, including for iteration. Hmm, I wonder if we need to use the isolation tester. It's both a blessing and a curse that the first client of this data structure is tid lookup. It's a blessing because it doesn't present a highly-concurrent workload mixing reads and writes and so simple locking is adequate. It's a curse because to test locking and have any chance of finding bugs, we can't rely on vacuum to tell us that because (as you've said) it might very well work fine with no locking at all. So we must come up with test cases ourselves.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 26 Jan 2023 13:54:43 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 24, 2023 at 1:17 PM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n>\n> On Mon, Jan 23, 2023 at 6:00 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > Attached is a rebase to fix conflicts from recent commits.\n>\n> I have reviewed v22-0022* patch and I have some comments.\n>\n> 1.\n> >It also changes to the column names max_dead_tuples and num_dead_tuples\nand to\n> >show the progress information in bytes.\n>\n> I think this statement needs to be rephrased.\n\nCould you be more specific?\n\n> 3.\n>\n> We are changing the min value of 'maintenance_work_mem' to 2MB. Should\n> we do the same for the 'autovacuum_work_mem'?\n\nYes, we should change that, too. We've discussed previously that\nautovacuum_work_mem is possibly rendered unnecessary by this work, but we\nagreed that that should be a separate thread. And needs additional testing\nto verify.\n\nI agree with your other comments.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Tue, Jan 24, 2023 at 1:17 PM Dilip Kumar <dilipbalaut@gmail.com> wrote:>> On Mon, Jan 23, 2023 at 6:00 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> >> > Attached is a rebase to fix conflicts from recent commits.>> I have reviewed v22-0022* patch and I have some comments.>> 1.> >It also changes to the column names max_dead_tuples and num_dead_tuples and to> >show the progress information in bytes.>> I think this statement needs to be rephrased.Could you be more specific?> 3.>> We are changing the min value of 'maintenance_work_mem' to 2MB. Should> we do the same for the 'autovacuum_work_mem'?Yes, we should change that, too. We've discussed previously that autovacuum_work_mem is possibly rendered unnecessary by this work, but we agreed that that should be a separate thread. And needs additional testing to verify.I agree with your other comments.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 26 Jan 2023 14:08:52 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jan 26, 2023 at 3:54 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Wed, Jan 25, 2023 at 8:42 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Jan 23, 2023 at 8:20 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > On Mon, Jan 16, 2023 at 3:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > >\n> > > > On Mon, Jan 16, 2023 at 2:02 PM John Naylor\n> > > > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > In v21, all of your v20 improvements to the radix tree template and test have been squashed into 0003, with one exception: v20-0010 (recursive freeing of shared mem), which I've attached separately (for flexibility) as v21-0006. I believe one of your earlier patches had a new DSA function for freeing memory more quickly -- was there a problem with that approach? I don't recall where that discussion went.\n> >\n> > Hmm, I don't remember I proposed such a patch, either.\n>\n> I went looking, and it turns out I remembered wrong, sorry.\n>\n> > One idea to address it would be that we pass a shared memory to\n> > RT_CREATE() and we create a DSA area dedicated to the radix tree in\n> > place. We should return the created DSA area along with the radix tree\n> > so that the caller can use it (e.g., for dsa_get_handle(), dsa_pin(),\n> > and dsa_pin_mapping() etc). In RT_FREE(), we just detach from the DSA\n> > area. A downside of this idea would be that one DSA area only for a\n> > radix tree is always required.\n> >\n> > Another idea would be that we allocate a big enough DSA area and\n> > quarry small memory for nodes from there. But it would need to\n> > introduce another complexity so I prefer to avoid it.\n> >\n> > FYI the current design is inspired by dshash.c. In dshash_destory(),\n> > we dsa_free() each elements allocated by dshash.c\n>\n> Okay, thanks for the info.\n>\n> > > 0007 makes the value type configurable. Some debug functionality still assumes integer type, but I think the rest is agnostic.\n> >\n> > radixtree_search_impl.h still assumes that the value type is an\n> > integer type as follows:\n> >\n> > #ifdef RT_NODE_LEVEL_LEAF\n> > RT_VALUE_TYPE value = 0;\n> >\n> > Assert(RT_NODE_IS_LEAF(node));\n> > #else\n> >\n> > Also, I think if we make the value type configurable, it's better to\n> > pass the pointer of the value to RT_SET() instead of copying the\n> > values since the value size could be large.\n>\n> Thanks, I will remove the assignment and look into pass-by-reference.\n>\n> > Oops, the fix is missed in the patch for some reason. I'll fix it.\n> >\n> > > There is also this, in the template, which I'm not sure has been addressed:\n> > >\n> > > * XXX: Currently we allow only one process to do iteration. Therefore, rt_node_iter\n> > > * has the local pointers to nodes, rather than RT_PTR_ALLOC.\n> > > * We need either a safeguard to disallow other processes to begin the iteration\n> > > * while one process is doing or to allow multiple processes to do the iteration.\n> >\n> > It's not addressed yet. I think adding a safeguard is better for the\n> > first version. A simple solution is to add a flag, say iter_active, to\n> > allow only one process to enable the iteration. What do you think?\n>\n> I don't quite have enough info to offer an opinion, but this sounds like a different form of locking. I'm sure it's come up before, but could you describe why iteration is different from other operations, regarding concurrency?\n\nI think that we need to prevent concurrent updates (RT_SET() and\nRT_DELETE()) during the iteration to get the consistent result through\nthe whole iteration operation. Unlike other operations such as\nRT_SET(), we cannot expect that a job doing something for each\nkey-value pair in the radix tree completes in a short time, so we\ncannot keep holding the radix tree lock until the end of the\niteration. So the idea is that we set iter_active to true (with the\nlock in exclusive mode), and prevent concurrent updates when the flag\nis true.\n\n>\n> > > Would it be worth it (or possible) to calculate constants based on compile-time block size? And/or have a fallback for other table AMs? Since this file is in access/common, the intention is to allow general-purpose, I imagine.\n> >\n> > I think we can pass the maximum offset numbers to tidstore_create()\n> > and calculate these values.\n>\n> That would work easily for vacuumlazy.c, since it's in the \"heap\" subdir so we know the max possible offset. I haven't looked at vacuumparallel.c, but I can tell it is not in a heap-specific directory, so I don't know how easy that would be to pass along the right value.\n\nI think the user (e.g, vacuumlazy.c) can pass the maximum offset\nnumber to the parallel vacuum.\n\n>\n> > > About shared memory: I have some mild reservations about the naming of the \"control object\", which may be in shared memory. Is that an established term? (If so, disregard the rest): It seems backwards -- the thing in shared memory is the actual tree itself. The thing in backend-local memory has the \"handle\", and that's how we control the tree. I don't have a better naming scheme, though, and might not be that important. (Added a WIP comment)\n> >\n> > That seems a valid concern. I borrowed the \"control object\" from\n> > dshash.c but it supports only shared cases. The fact that the radix\n> > tree supports both local and shared seems to introduce this confusion.\n> > I came up with other names such as RT_RADIX_TREE_CORE or\n> > RT_RADIX_TREE_ROOT but not sure these are better than the current\n> > one.\n>\n> Okay, if dshash uses it, we have some precedent.\n>\n> > > Now might be a good time to look at earlier XXX comments and come up with a plan to address them.\n> >\n> > Agreed.\n> >\n> > Other XXX comments that are not mentioned yet are:\n> >\n> > + /* XXX: memory context support */\n> > + tree = (RT_RADIX_TREE *) palloc0(sizeof(RT_RADIX_TREE));\n> >\n> > I'm not sure we really need memory context support for RT_ATTACH()\n> > since in the shared case, we allocate backend-local memory only for\n> > RT_RADIX_TREE.\n>\n> Okay, we can remove this.\n>\n> > ---\n> > +RT_SCOPE uint64\n> > +RT_MEMORY_USAGE(RT_RADIX_TREE *tree)\n> > +{\n> > + // XXX is this necessary?\n> > + Size total = sizeof(RT_RADIX_TREE);\n> >\n> > Regarding this, I followed intset_memory_usage(). But in the radix\n> > tree, RT_RADIX_TREE is very small so probably we can ignore it.\n>\n> That was more a note to myself that I forgot about, so here is my reasoning: In the shared case, we just overwrite that initial total, but for the local case we add to it. A future reader could think this is inconsistent and needs to be fixed. Since we deduct from the guc limit to guard against worst-case re-allocation, and that deduction is not very precise (nor needs to be), I agree we should just forget about tiny sizes like this in both cases.\n\nThanks for your explanation, agreed.\n\n>\n> > ---\n> > +/* XXX For display, assumes value type is numeric */\n> > +static void\n> > +RT_DUMP_NODE(RT_PTR_LOCAL node, int level, bool recurse)\n> >\n> > I think we can display values in hex encoded format but given the\n> > value could be large, we don't necessarily need to display actual\n> > values. Displaying the tree structure and chunks would be helpful for\n> > debugging the radix tree.\n>\n> Okay, I can try that unless you do it first.\n>\n> > There is no XXX comment but I'll try to add lock support in the next\n> > version patch.\n>\n> Since there were calls to LWLockAcquire/Release in the last version, I'm a bit confused by this. Perhaps for the next patch, the email should contain a few sentences describing how locking is intended to work, including for iteration.\n\nThe lock I'm thinking of adding is a simple readers-writer lock. This\nlock is used for concurrent radix tree operations except for the\niteration. For operations concurrent to the iteration, I used a flag\nfor the reason I mentioned above.\n\n>\n> Hmm, I wonder if we need to use the isolation tester. It's both a blessing and a curse that the first client of this data structure is tid lookup. It's a blessing because it doesn't present a highly-concurrent workload mixing reads and writes and so simple locking is adequate. It's a curse because to test locking and have any chance of finding bugs, we can't rely on vacuum to tell us that because (as you've said) it might very well work fine with no locking at all. So we must come up with test cases ourselves.\n\nUsing the isolation tester to test locking seems like a good idea. We\ncan include it in test_radixtree. But given that the locking in the\nradix tree is very simple, the test case would be very simple. It may\nbe controversial whether it's worth adding such testing by adding both\nthe new test module and test cases.\n\nI'm working on the fixes I mentioned in the previous email and going\nto share the updated patch today. Please wait to do these fixes if\nyou're okay.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 26 Jan 2023 17:32:52 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jan 26, 2023 at 5:32 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> I'm working on the fixes I mentioned in the previous email and going\n> to share the updated patch today. Please wait to do these fixes if\n> you're okay.\n>\n\nI've attached updated version patches. As we agreed I've merged your\nchanges in v22 into the main (0003) patch. But I still kept the patch\nof recursively freeing nodes separate as we might need more\ndiscussion. In v23 I attached, 0006 through 0016 patches are fixes and\nimprovements for the radix tree. I've incorporated all comments I got\nunless I'm missing something.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 26 Jan 2023 23:47:40 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jan 26, 2023 at 3:33 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Thu, Jan 26, 2023 at 3:54 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\n> I think that we need to prevent concurrent updates (RT_SET() and\n> RT_DELETE()) during the iteration to get the consistent result through\n> the whole iteration operation. Unlike other operations such as\n> RT_SET(), we cannot expect that a job doing something for each\n> key-value pair in the radix tree completes in a short time, so we\n> cannot keep holding the radix tree lock until the end of the\n> iteration.\n\nThis sounds like a performance concern, rather than a correctness concern,\nis that right? If so, I don't think we should worry too much about\noptimizing simple locking, because it will *never* be fast enough for\nhighly-concurrent read-write workloads anyway, and anyone interested in\nthose workloads will have to completely replace the locking scheme,\npossibly using one of the ideas in the last ART paper you mentioned.\n\nThe first implementation should be simple, easy to test/verify, easy to\nunderstand, and easy to replace. As much as possible anyway.\n\n> So the idea is that we set iter_active to true (with the\n> lock in exclusive mode), and prevent concurrent updates when the flag\n> is true.\n\n...by throwing elog(ERROR)? I'm not so sure users of this API would prefer\nthat to waiting.\n\n> > Since there were calls to LWLockAcquire/Release in the last version,\nI'm a bit confused by this. Perhaps for the next patch, the email should\ncontain a few sentences describing how locking is intended to work,\nincluding for iteration.\n>\n> The lock I'm thinking of adding is a simple readers-writer lock. This\n> lock is used for concurrent radix tree operations except for the\n> iteration. For operations concurrent to the iteration, I used a flag\n> for the reason I mentioned above.\n\nThis doesn't tell me anything -- we already agreed on \"simple reader-writer\nlock\", months ago I believe. And I only have a vague idea about the\ntradeoffs made regarding iteration.\n\n+ * WIP: describe about how locking works.\n\nA first draft of what is intended for this WIP would be a good start. This\nWIP is from v23-0016, which contains no comments and a one-line commit\nmessage. I'd rather not try closely studying that patch (or how it works\nwith 0011) until I have a clearer understanding of what requirements are\nassumed, what trade-offs are considered, and how it should be tested.\n\n[thinks some more...] Is there an API-level assumption that hasn't been\nspelled out? Would it help to have a parameter for whether the iteration\nfunction wants to reserve the privilege to perform writes? It could take\nthe appropriate lock at the start, and there could then be multiple\nread-only iterators, but only one read/write iterator. Note, I'm just\nguessing here, and I don't want to make things more difficult for future\nimprovements.\n\n> > Hmm, I wonder if we need to use the isolation tester. It's both a\nblessing and a curse that the first client of this data structure is tid\nlookup. It's a blessing because it doesn't present a highly-concurrent\nworkload mixing reads and writes and so simple locking is adequate. It's a\ncurse because to test locking and have any chance of finding bugs, we can't\nrely on vacuum to tell us that because (as you've said) it might very well\nwork fine with no locking at all. So we must come up with test cases\nourselves.\n>\n> Using the isolation tester to test locking seems like a good idea. We\n> can include it in test_radixtree. But given that the locking in the\n> radix tree is very simple, the test case would be very simple. It may\n> be controversial whether it's worth adding such testing by adding both\n> the new test module and test cases.\n\nI mean that the isolation tester (or something else) would contain test\ncases. I didn't mean to imply redundant testing.\n\n> I think the user (e.g, vacuumlazy.c) can pass the maximum offset\n> number to the parallel vacuum.\n\nOkay, sounds good.\n\nMost of v23's cleanups/fixes in the radix template look good to me,\nalthough I didn't read the debugging code very closely. There is one\nexception:\n\n0006 - I've never heard of memset'ing a variable to avoid \"variable unused\"\ncompiler warnings, and it seems strange. It turns out we don't actually\nneed this variable in the first place. The attached .txt patch removes the\nlocal variable and just writes to the passed pointer. This required callers\nto initialize a couple of their own variables, but only child pointers, at\nleast on gcc 12. And I will work later on making \"value\" in the public API\na pointer.\n\n0017 - I haven't taken a close look at the new changes, but I did notice\nthis some time ago:\n\n+ if (TidStoreIsShared(ts))\n+ return sizeof(TidStore) + shared_rt_memory_usage(ts->tree.shared);\n+ else\n+ return sizeof(TidStore) + sizeof(TidStore) +\n+ local_rt_memory_usage(ts->tree.local);\n\nThere is repetition in the else branch.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Sat, 28 Jan 2023 18:32:50 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sat, Jan 28, 2023 at 8:33 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Thu, Jan 26, 2023 at 3:33 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Jan 26, 2023 at 3:54 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n>\n> > I think that we need to prevent concurrent updates (RT_SET() and\n> > RT_DELETE()) during the iteration to get the consistent result through\n> > the whole iteration operation. Unlike other operations such as\n> > RT_SET(), we cannot expect that a job doing something for each\n> > key-value pair in the radix tree completes in a short time, so we\n> > cannot keep holding the radix tree lock until the end of the\n> > iteration.\n>\n> This sounds like a performance concern, rather than a correctness concern, is that right? If so, I don't think we should worry too much about optimizing simple locking, because it will *never* be fast enough for highly-concurrent read-write workloads anyway, and anyone interested in those workloads will have to completely replace the locking scheme, possibly using one of the ideas in the last ART paper you mentioned.\n>\n> The first implementation should be simple, easy to test/verify, easy to understand, and easy to replace. As much as possible anyway.\n\nYes, but if a concurrent writer waits for another process to finish\nthe iteration, it ends up waiting on a lwlock, which is not\ninterruptible.\n\n>\n> > So the idea is that we set iter_active to true (with the\n> > lock in exclusive mode), and prevent concurrent updates when the flag\n> > is true.\n>\n> ...by throwing elog(ERROR)? I'm not so sure users of this API would prefer that to waiting.\n\nRight. I think if we want to wait rather than an ERROR, the waiter\nshould wait in an interruptible way, for example, a condition\nvariable. I did a simpler way in the v22 patch.\n\n...but looking at dshash.c, dshash_seq_next() seems to return an entry\nwhile holding a lwlock on the partition. My assumption might be wrong.\n\n>\n> > > Since there were calls to LWLockAcquire/Release in the last version, I'm a bit confused by this. Perhaps for the next patch, the email should contain a few sentences describing how locking is intended to work, including for iteration.\n> >\n> > The lock I'm thinking of adding is a simple readers-writer lock. This\n> > lock is used for concurrent radix tree operations except for the\n> > iteration. For operations concurrent to the iteration, I used a flag\n> > for the reason I mentioned above.\n>\n> This doesn't tell me anything -- we already agreed on \"simple reader-writer lock\", months ago I believe. And I only have a vague idea about the tradeoffs made regarding iteration.\n>\n> + * WIP: describe about how locking works.\n>\n> A first draft of what is intended for this WIP would be a good start. This WIP is from v23-0016, which contains no comments and a one-line commit message. I'd rather not try closely studying that patch (or how it works with 0011) until I have a clearer understanding of what requirements are assumed, what trade-offs are considered, and how it should be tested.\n>\n> [thinks some more...] Is there an API-level assumption that hasn't been spelled out? Would it help to have a parameter for whether the iteration function wants to reserve the privilege to perform writes? It could take the appropriate lock at the start, and there could then be multiple read-only iterators, but only one read/write iterator. Note, I'm just guessing here, and I don't want to make things more difficult for future improvements.\n\nSeems a good idea. Given the use case for parallel heap vacuum, it\nwould be a good idea to support having multiple read-only writers. The\niteration of the v22 is read-only, so if we want to support read-write\niterator, we would need to support a function that modifies the\ncurrent key-value returned by the iteration.\n\n>\n> > > Hmm, I wonder if we need to use the isolation tester. It's both a blessing and a curse that the first client of this data structure is tid lookup. It's a blessing because it doesn't present a highly-concurrent workload mixing reads and writes and so simple locking is adequate. It's a curse because to test locking and have any chance of finding bugs, we can't rely on vacuum to tell us that because (as you've said) it might very well work fine with no locking at all. So we must come up with test cases ourselves.\n> >\n> > Using the isolation tester to test locking seems like a good idea. We\n> > can include it in test_radixtree. But given that the locking in the\n> > radix tree is very simple, the test case would be very simple. It may\n> > be controversial whether it's worth adding such testing by adding both\n> > the new test module and test cases.\n>\n> I mean that the isolation tester (or something else) would contain test cases. I didn't mean to imply redundant testing.\n\nOkay, understood.\n\n>\n> > I think the user (e.g, vacuumlazy.c) can pass the maximum offset\n> > number to the parallel vacuum.\n>\n> Okay, sounds good.\n>\n> Most of v23's cleanups/fixes in the radix template look good to me, although I didn't read the debugging code very closely. There is one exception:\n>\n> 0006 - I've never heard of memset'ing a variable to avoid \"variable unused\" compiler warnings, and it seems strange. It turns out we don't actually need this variable in the first place. The attached .txt patch removes the local variable and just writes to the passed pointer. This required callers to initialize a couple of their own variables, but only child pointers, at least on gcc 12.\n\nAgreed with the attached patch.\n\n> And I will work later on making \"value\" in the public API a pointer.\n\nThanks!\n\n>\n> 0017 - I haven't taken a close look at the new changes, but I did notice this some time ago:\n>\n> + if (TidStoreIsShared(ts))\n> + return sizeof(TidStore) + shared_rt_memory_usage(ts->tree.shared);\n> + else\n> + return sizeof(TidStore) + sizeof(TidStore) +\n> + local_rt_memory_usage(ts->tree.local);\n>\n> There is repetition in the else branch.\n\nAgreed, will remove.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Sun, 29 Jan 2023 23:49:56 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jan 26, 2023 at 12:39 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Tue, Jan 24, 2023 at 1:17 PM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n> >\n> > On Mon, Jan 23, 2023 at 6:00 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > Attached is a rebase to fix conflicts from recent commits.\n> >\n> > I have reviewed v22-0022* patch and I have some comments.\n> >\n> > 1.\n> > >It also changes to the column names max_dead_tuples and num_dead_tuples and to\n> > >show the progress information in bytes.\n> >\n> > I think this statement needs to be rephrased.\n>\n> Could you be more specific?\n\nI mean the below statement in the commit message doesn't look\ngrammatically correct to me.\n\n\"It also changes to the column names max_dead_tuples and\nnum_dead_tuples and to show the progress information in bytes.\"\n\n-- \nRegards,\nDilip Kumar\nEnterpriseDB: http://www.enterprisedb.com\n\n\n", "msg_date": "Mon, 30 Jan 2023 09:38:17 +0530", "msg_from": "Dilip Kumar <dilipbalaut@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sun, Jan 29, 2023 at 9:50 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Sat, Jan 28, 2023 at 8:33 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\n> > The first implementation should be simple, easy to test/verify, easy to\nunderstand, and easy to replace. As much as possible anyway.\n>\n> Yes, but if a concurrent writer waits for another process to finish\n> the iteration, it ends up waiting on a lwlock, which is not\n> interruptible.\n>\n> >\n> > > So the idea is that we set iter_active to true (with the\n> > > lock in exclusive mode), and prevent concurrent updates when the flag\n> > > is true.\n> >\n> > ...by throwing elog(ERROR)? I'm not so sure users of this API would\nprefer that to waiting.\n>\n> Right. I think if we want to wait rather than an ERROR, the waiter\n> should wait in an interruptible way, for example, a condition\n> variable. I did a simpler way in the v22 patch.\n>\n> ...but looking at dshash.c, dshash_seq_next() seems to return an entry\n> while holding a lwlock on the partition. My assumption might be wrong.\n\nUsing partitions there makes holding a lock less painful on average, I\nimagine, but I don't know the details there.\n\nIf we make it clear that the first committed version is not (yet) designed\nfor high concurrency with mixed read-write workloads, I think waiting (as a\nprotocol) is fine. If waiting is a problem for some use case, at that point\nwe should just go all the way and replace the locking entirely. In fact, it\nmight be good to spell this out in the top-level comment and include a link\nto the second ART paper.\n\n> > [thinks some more...] Is there an API-level assumption that hasn't been\nspelled out? Would it help to have a parameter for whether the iteration\nfunction wants to reserve the privilege to perform writes? It could take\nthe appropriate lock at the start, and there could then be multiple\nread-only iterators, but only one read/write iterator. Note, I'm just\nguessing here, and I don't want to make things more difficult for future\nimprovements.\n>\n> Seems a good idea. Given the use case for parallel heap vacuum, it\n> would be a good idea to support having multiple read-only writers. The\n> iteration of the v22 is read-only, so if we want to support read-write\n> iterator, we would need to support a function that modifies the\n> current key-value returned by the iteration.\n\nOkay, so updating during iteration is not currently supported. It could in\nthe future, but I'd say that can also wait for fine-grained concurrency\nsupport. Intermediate-term, we should at least make it straightforward to\nsupport:\n\n1) parallel heap vacuum -> multiple read-only iterators\n2) parallel heap pruning -> multiple writers\n\nIt may or may not be worth it for someone to actually start either of those\nprojects, and there are other ways to improve vacuum that may be more\npressing. That said, it seems the tid store with global locking would\ncertainly work fine for #1 and maybe \"not too bad\" for #2. #2 can also\nmitigate waiting by using larger batching, or the leader process could\n\"pre-warm\" the tid store with zero-values using block numbers from the\nvisibility map.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Sun, Jan 29, 2023 at 9:50 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Sat, Jan 28, 2023 at 8:33 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> > The first implementation should be simple, easy to test/verify, easy to understand, and easy to replace. As much as possible anyway.>> Yes, but if a concurrent writer waits for another process to finish> the iteration, it ends up waiting on a lwlock, which is not> interruptible.>> >> > > So the idea is that we set iter_active to true (with the> > > lock in exclusive mode), and prevent concurrent updates when the flag> > > is true.> >> > ...by throwing elog(ERROR)? I'm not so sure users of this API would prefer that to waiting.>> Right. I think if we want to wait rather than an ERROR, the waiter> should wait in an interruptible way, for example, a condition> variable. I did a simpler way in the v22 patch.>> ...but looking at dshash.c, dshash_seq_next() seems to return an entry> while holding a lwlock on the partition. My assumption might be wrong.Using partitions there makes holding a lock less painful on average, I imagine, but I don't know the details there.If we make it clear that the first committed version is not (yet) designed for high concurrency with mixed read-write workloads, I think waiting (as a protocol) is fine. If waiting is a problem for some use case, at that point we should just go all the way and replace the locking entirely. In fact, it might be good to spell this out in the top-level comment and include a link to the second ART paper.> > [thinks some more...] Is there an API-level assumption that hasn't been spelled out? Would it help to have a parameter for whether the iteration function wants to reserve the privilege to perform writes? It could take the appropriate lock at the start, and there could then be multiple read-only iterators, but only one read/write iterator. Note, I'm just guessing here, and I don't want to make things more difficult for future improvements.>> Seems a good idea. Given the use case for parallel heap vacuum, it> would be a good idea to support having multiple read-only writers. The> iteration of the v22 is read-only, so if we want to support read-write> iterator, we would need to support a function that modifies the> current key-value returned by the iteration.Okay, so updating during iteration is not currently supported. It could in the future, but I'd say that can also wait for fine-grained concurrency support. Intermediate-term, we should at least make it straightforward to support: 1) parallel heap vacuum  -> multiple read-only iterators2) parallel heap pruning -> multiple writersIt may or may not be worth it for someone to actually start either of those projects, and there are other ways to improve vacuum that may be more pressing. That said, it seems the tid store with global locking would certainly work fine for #1 and maybe \"not too bad\" for #2. #2 can also mitigate waiting by using larger batching, or the leader process could \"pre-warm\" the tid store with zero-values using block numbers from the visibility map.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 30 Jan 2023 11:31:41 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 30, 2023 at 1:08 PM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n>\n> On Thu, Jan 26, 2023 at 12:39 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> >\n> > On Tue, Jan 24, 2023 at 1:17 PM Dilip Kumar <dilipbalaut@gmail.com> wrote:\n> > >\n> > > On Mon, Jan 23, 2023 at 6:00 PM John Naylor\n> > > <john.naylor@enterprisedb.com> wrote:\n> > > >\n> > > > Attached is a rebase to fix conflicts from recent commits.\n> > >\n> > > I have reviewed v22-0022* patch and I have some comments.\n> > >\n> > > 1.\n> > > >It also changes to the column names max_dead_tuples and num_dead_tuples and to\n> > > >show the progress information in bytes.\n> > >\n> > > I think this statement needs to be rephrased.\n> >\n> > Could you be more specific?\n>\n> I mean the below statement in the commit message doesn't look\n> grammatically correct to me.\n>\n> \"It also changes to the column names max_dead_tuples and\n> num_dead_tuples and to show the progress information in bytes.\"\n>\n\nI've changed the commit message in the v23 patch. Please check it.\nOther comments are also incorporated in the v23 patch. Thank you for\nthe comments!\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 30 Jan 2023 16:12:54 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 30, 2023 at 1:31 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Sun, Jan 29, 2023 at 9:50 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Sat, Jan 28, 2023 at 8:33 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n>\n> > > The first implementation should be simple, easy to test/verify, easy to understand, and easy to replace. As much as possible anyway.\n> >\n> > Yes, but if a concurrent writer waits for another process to finish\n> > the iteration, it ends up waiting on a lwlock, which is not\n> > interruptible.\n> >\n> > >\n> > > > So the idea is that we set iter_active to true (with the\n> > > > lock in exclusive mode), and prevent concurrent updates when the flag\n> > > > is true.\n> > >\n> > > ...by throwing elog(ERROR)? I'm not so sure users of this API would prefer that to waiting.\n> >\n> > Right. I think if we want to wait rather than an ERROR, the waiter\n> > should wait in an interruptible way, for example, a condition\n> > variable. I did a simpler way in the v22 patch.\n> >\n> > ...but looking at dshash.c, dshash_seq_next() seems to return an entry\n> > while holding a lwlock on the partition. My assumption might be wrong.\n>\n> Using partitions there makes holding a lock less painful on average, I imagine, but I don't know the details there.\n>\n> If we make it clear that the first committed version is not (yet) designed for high concurrency with mixed read-write workloads, I think waiting (as a protocol) is fine. If waiting is a problem for some use case, at that point we should just go all the way and replace the locking entirely. In fact, it might be good to spell this out in the top-level comment and include a link to the second ART paper.\n\nAgreed. Will update the comments.\n\n>\n> > > [thinks some more...] Is there an API-level assumption that hasn't been spelled out? Would it help to have a parameter for whether the iteration function wants to reserve the privilege to perform writes? It could take the appropriate lock at the start, and there could then be multiple read-only iterators, but only one read/write iterator. Note, I'm just guessing here, and I don't want to make things more difficult for future improvements.\n> >\n> > Seems a good idea. Given the use case for parallel heap vacuum, it\n> > would be a good idea to support having multiple read-only writers. The\n> > iteration of the v22 is read-only, so if we want to support read-write\n> > iterator, we would need to support a function that modifies the\n> > current key-value returned by the iteration.\n>\n> Okay, so updating during iteration is not currently supported. It could in the future, but I'd say that can also wait for fine-grained concurrency support. Intermediate-term, we should at least make it straightforward to support:\n>\n> 1) parallel heap vacuum -> multiple read-only iterators\n> 2) parallel heap pruning -> multiple writers\n>\n> It may or may not be worth it for someone to actually start either of those projects, and there are other ways to improve vacuum that may be more pressing. That said, it seems the tid store with global locking would certainly work fine for #1 and maybe \"not too bad\" for #2. #2 can also mitigate waiting by using larger batching, or the leader process could \"pre-warm\" the tid store with zero-values using block numbers from the visibility map.\n\nTrue. Using a larger batching method seems to be worth testing when we\nimplement the parallel heap pruning.\n\nIn the next version patch, I'm going to update the locking support\npart and incorporate other comments I got.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 30 Jan 2023 23:30:01 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 30, 2023 at 11:30 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Jan 30, 2023 at 1:31 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > On Sun, Jan 29, 2023 at 9:50 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Sat, Jan 28, 2023 at 8:33 PM John Naylor\n> > > <john.naylor@enterprisedb.com> wrote:\n> >\n> > > > The first implementation should be simple, easy to test/verify, easy to understand, and easy to replace. As much as possible anyway.\n> > >\n> > > Yes, but if a concurrent writer waits for another process to finish\n> > > the iteration, it ends up waiting on a lwlock, which is not\n> > > interruptible.\n> > >\n> > > >\n> > > > > So the idea is that we set iter_active to true (with the\n> > > > > lock in exclusive mode), and prevent concurrent updates when the flag\n> > > > > is true.\n> > > >\n> > > > ...by throwing elog(ERROR)? I'm not so sure users of this API would prefer that to waiting.\n> > >\n> > > Right. I think if we want to wait rather than an ERROR, the waiter\n> > > should wait in an interruptible way, for example, a condition\n> > > variable. I did a simpler way in the v22 patch.\n> > >\n> > > ...but looking at dshash.c, dshash_seq_next() seems to return an entry\n> > > while holding a lwlock on the partition. My assumption might be wrong.\n> >\n> > Using partitions there makes holding a lock less painful on average, I imagine, but I don't know the details there.\n> >\n> > If we make it clear that the first committed version is not (yet) designed for high concurrency with mixed read-write workloads, I think waiting (as a protocol) is fine. If waiting is a problem for some use case, at that point we should just go all the way and replace the locking entirely. In fact, it might be good to spell this out in the top-level comment and include a link to the second ART paper.\n>\n> Agreed. Will update the comments.\n>\n> >\n> > > > [thinks some more...] Is there an API-level assumption that hasn't been spelled out? Would it help to have a parameter for whether the iteration function wants to reserve the privilege to perform writes? It could take the appropriate lock at the start, and there could then be multiple read-only iterators, but only one read/write iterator. Note, I'm just guessing here, and I don't want to make things more difficult for future improvements.\n> > >\n> > > Seems a good idea. Given the use case for parallel heap vacuum, it\n> > > would be a good idea to support having multiple read-only writers. The\n> > > iteration of the v22 is read-only, so if we want to support read-write\n> > > iterator, we would need to support a function that modifies the\n> > > current key-value returned by the iteration.\n> >\n> > Okay, so updating during iteration is not currently supported. It could in the future, but I'd say that can also wait for fine-grained concurrency support. Intermediate-term, we should at least make it straightforward to support:\n> >\n> > 1) parallel heap vacuum -> multiple read-only iterators\n> > 2) parallel heap pruning -> multiple writers\n> >\n> > It may or may not be worth it for someone to actually start either of those projects, and there are other ways to improve vacuum that may be more pressing. That said, it seems the tid store with global locking would certainly work fine for #1 and maybe \"not too bad\" for #2. #2 can also mitigate waiting by using larger batching, or the leader process could \"pre-warm\" the tid store with zero-values using block numbers from the visibility map.\n>\n> True. Using a larger batching method seems to be worth testing when we\n> implement the parallel heap pruning.\n>\n> In the next version patch, I'm going to update the locking support\n> part and incorporate other comments I got.\n>\n\nI've attached v24 patches. The locking support patch is separated\n(0005 patch). Also I kept the updates for TidStore and the vacuum\nintegration from v23 separate.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 31 Jan 2023 23:42:32 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 31, 2023 at 9:43 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> I've attached v24 patches. The locking support patch is separated\n> (0005 patch). Also I kept the updates for TidStore and the vacuum\n> integration from v23 separate.\n\nOkay, that's a lot more simple, and closer to what I imagined. For v25, I\nsquashed v24's additions and added a couple of my own. I've kept the CF\nstatus at \"needs review\" because no specific action is required at the\nmoment.\n\nI did start to review the TID store some more, but that's on hold because\nsomething else came up: On a lark I decided to re-run some benchmarks to\nsee if anything got lost in converting to a template, and that led me down\na rabbit hole -- some good and bad news on that below.\n\n0001:\n\nI removed the uint64 case, as discussed. There is now a brief commit\nmessage, but needs to be fleshed out a bit. I took another look at the Arm\noptimization that Nathan found some month ago, for forming the highbit\nmask, but that doesn't play nicely with how node32 uses it, so I decided\nagainst it. I added a comment to describe the reasoning in case someone\nelse gets a similar idea.\n\nI briefly looked into \"separate-commit TODO: move non-SIMD fallbacks to\ntheir own header to clean up the #ifdef maze.\", but decided it wasn't such\na clear win to justify starting the work now. It's still in the back of my\nmind, but I removed the reminder from the commit message.\n\n0003:\n\nThe template now requires the value to be passed as a pointer. That was a\npretty trivial change, but affected multiple other patches, so not sent\nseparately. Also adds a forgotten RT_ prefix to the bitmap macros and adds\na top comment to the *_impl.h headers. There are some comment fixes. The\nchanges were either trivial or discussed earlier, so also not sent\nseparately.\n\n0004/5: I wanted to measure the load time as well as search time in\nbench_search_random_nodes(). That's kept separate to make it easier to test\nother patch versions.\n\nThe bad news is that the speed of loading TIDs in\nbench_seq/shuffle_search() has regressed noticeably. I can't reproduce this\nin any other bench function and was the reason for writing 0005 to begin\nwith. More confusingly, my efforts to fix this improved *other* functions,\nbut the former didn't budge at all. First the patches:\n\n0006 adds and removes some \"inline\" declarations (where it made sense), and\nadded some for \"pg_noinline\" based on Andres' advice some months ago.\n\n0007 removes some dead code. RT_NODE_INSERT_INNER is only called during\nRT_SET_EXTEND, so it can't possibly find an existing key. This kind of\nchange is much easier with the inner/node cases handled together in a\ntemplate, as far as being sure of how those cases are different. I thought\nabout trying the search in assert builds and verifying it doesn't exist,\nbut thought yet another #ifdef would be too messy.\n\nv25-addendum-try-no-maintain-order.txt -- It makes optional keeping the key\nchunks in order for the linear-search nodes. I believe the TID store no\nlonger cares about the ordering, but this is a text file for now because I\ndon't want to clutter the CI with a behavior change. Also, the second ART\npaper (on concurrency) mentioned that some locking schemes don't allow\nthese arrays to be shifted. So it might make sense to give up entirely on\nguaranteeing ordered iteration, or at least make it optional as in the\npatch.\n\nNow for some numbers:\n\n========================================\npsql -c \"select * from bench_search_random_nodes(10*1000*1000)\"\n(min load time of three)\n\nv15:\n mem_allocated | load_ms | search_ms\n---------------+---------+-----------\n 334182184 | 3352 | 2073\n\nv25-0005:\n mem_allocated | load_ms | search_ms\n---------------+---------+-----------\n 331987008 | 3426 | 2126\n\nv25-0006 (inlining or not):\n mem_allocated | load_ms | search_ms\n---------------+---------+-----------\n 331987008 | 3327 | 2035\n\nv25-0007 (remove dead code):\n mem_allocated | load_ms | search_ms\n---------------+---------+-----------\n 331987008 | 3313 | 2037\n\nv25-addendum...txt (no ordering):\n mem_allocated | load_ms | search_ms\n---------------+---------+-----------\n 331987008 | 2762 | 2042\n\nAllowing unordered inserts helps a lot here in loading. That's expected\nbecause there are a lot of inserts into the linear nodes. 0006 might help a\nlittle.\n\n========================================\npsql -c \"select avg(load_ms) from generate_series(1,30) x(x), lateral\n(select * from bench_load_random_int(500 * 1000 * (1+x-x))) a\"\n\nv15:\n avg\n----------------------\n 207.3000000000000000\n\nv25-0005:\n avg\n----------------------\n 190.6000000000000000\n\nv25-0006 (inlining or not):\n avg\n----------------------\n 189.3333333333333333\n\nv25-0007 (remove dead code):\n avg\n----------------------\n 186.4666666666666667\n\nv25-addendum...txt (no ordering):\n avg\n----------------------\n 179.7000000000000000\n\nMost of the improvement from v15 to v25 probably comes from the change from\nnode4 to node3, and this test stresses that node the most. That shows in\nthe total memory used: it goes from 152MB to 132MB. Allowing unordered\ninserts helps some, the others are not convincing.\n\n========================================\npsql -c \"select rt_load_ms, rt_search_ms from bench_seq_search(0, 1 * 1000\n* 1000)\"\n(min load time of three)\n\nv15:\n rt_load_ms | rt_search_ms\n------------+--------------\n 113 | 455\n\nv25-0005:\n rt_load_ms | rt_search_ms\n------------+--------------\n 135 | 456\n\nv25-0006 (inlining or not):\n rt_load_ms | rt_search_ms\n------------+--------------\n 136 | 455\n\nv25-0007 (remove dead code):\n rt_load_ms | rt_search_ms\n------------+--------------\n 135 | 455\n\nv25-addendum...txt (no ordering):\n rt_load_ms | rt_search_ms\n------------+--------------\n 134 | 455\n\nNote: The regression seems to have started in v17, which is the first with\na full template.\n\nNothing so far has helped here, and previous experience has shown that\ntrying to profile 100ms will not be useful. Instead of putting more effort\ninto diving deeper, it seems a better use of time to write a benchmark that\ncalls the tid store itself. That's more realistic, since this function was\nintended to test load and search of tids, but the tid store doesn't quite\noperate so simply anymore. What do you think, Masahiko?\n\nI'm inclined to keep 0006, because it might give a slight boost, and 0007\nbecause it's never a bad idea to remove dead code.\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Tue, 7 Feb 2023 16:25:44 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Feb 7, 2023 at 4:25 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n> [v25]\n\nThis conflicted with a commit from earlier today, so rebased in v26 with no\nfurther changes.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Tue, 7 Feb 2023 17:22:59 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn Tue, Feb 7, 2023 at 6:25 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Tue, Jan 31, 2023 at 9:43 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I've attached v24 patches. The locking support patch is separated\n> > (0005 patch). Also I kept the updates for TidStore and the vacuum\n> > integration from v23 separate.\n>\n> Okay, that's a lot more simple, and closer to what I imagined. For v25, I squashed v24's additions and added a couple of my own. I've kept the CF status at \"needs review\" because no specific action is required at the moment.\n>\n> I did start to review the TID store some more, but that's on hold because something else came up: On a lark I decided to re-run some benchmarks to see if anything got lost in converting to a template, and that led me down a rabbit hole -- some good and bad news on that below.\n>\n> 0001:\n>\n> I removed the uint64 case, as discussed. There is now a brief commit message, but needs to be fleshed out a bit. I took another look at the Arm optimization that Nathan found some month ago, for forming the highbit mask, but that doesn't play nicely with how node32 uses it, so I decided against it. I added a comment to describe the reasoning in case someone else gets a similar idea.\n>\n> I briefly looked into \"separate-commit TODO: move non-SIMD fallbacks to their own header to clean up the #ifdef maze.\", but decided it wasn't such a clear win to justify starting the work now. It's still in the back of my mind, but I removed the reminder from the commit message.\n\nThe changes make sense to me.\n\n>\n> 0003:\n>\n> The template now requires the value to be passed as a pointer. That was a pretty trivial change, but affected multiple other patches, so not sent separately. Also adds a forgotten RT_ prefix to the bitmap macros and adds a top comment to the *_impl.h headers. There are some comment fixes. The changes were either trivial or discussed earlier, so also not sent separately.\n\nGreat.\n\n>\n> 0004/5: I wanted to measure the load time as well as search time in bench_search_random_nodes(). That's kept separate to make it easier to test other patch versions.\n>\n> The bad news is that the speed of loading TIDs in bench_seq/shuffle_search() has regressed noticeably. I can't reproduce this in any other bench function and was the reason for writing 0005 to begin with. More confusingly, my efforts to fix this improved *other* functions, but the former didn't budge at all. First the patches:\n>\n> 0006 adds and removes some \"inline\" declarations (where it made sense), and added some for \"pg_noinline\" based on Andres' advice some months ago.\n\nAgreed.\n\n>\n> 0007 removes some dead code. RT_NODE_INSERT_INNER is only called during RT_SET_EXTEND, so it can't possibly find an existing key. This kind of change is much easier with the inner/node cases handled together in a template, as far as being sure of how those cases are different. I thought about trying the search in assert builds and verifying it doesn't exist, but thought yet another #ifdef would be too messy.\n\nAgreed.\n\n>\n> v25-addendum-try-no-maintain-order.txt -- It makes optional keeping the key chunks in order for the linear-search nodes. I believe the TID store no longer cares about the ordering, but this is a text file for now because I don't want to clutter the CI with a behavior change. Also, the second ART paper (on concurrency) mentioned that some locking schemes don't allow these arrays to be shifted. So it might make sense to give up entirely on guaranteeing ordered iteration, or at least make it optional as in the patch.\n\nI think it's still important for lazy vacuum that an iteration over a\nTID store returns TIDs in ascending order, because otherwise a heap\nvacuum does random writes. That being said, we can have\nRT_ITERATE_NEXT() return key-value pairs in an order regardless of how\nthe key chunks are stored in a node.\n\n> ========================================\n> psql -c \"select rt_load_ms, rt_search_ms from bench_seq_search(0, 1 * 1000 * 1000)\"\n> (min load time of three)\n>\n> v15:\n> rt_load_ms | rt_search_ms\n> ------------+--------------\n> 113 | 455\n>\n> v25-0005:\n> rt_load_ms | rt_search_ms\n> ------------+--------------\n> 135 | 456\n>\n> v25-0006 (inlining or not):\n> rt_load_ms | rt_search_ms\n> ------------+--------------\n> 136 | 455\n>\n> v25-0007 (remove dead code):\n> rt_load_ms | rt_search_ms\n> ------------+--------------\n> 135 | 455\n>\n> v25-addendum...txt (no ordering):\n> rt_load_ms | rt_search_ms\n> ------------+--------------\n> 134 | 455\n>\n> Note: The regression seems to have started in v17, which is the first with a full template.\n>\n> Nothing so far has helped here, and previous experience has shown that trying to profile 100ms will not be useful. Instead of putting more effort into diving deeper, it seems a better use of time to write a benchmark that calls the tid store itself. That's more realistic, since this function was intended to test load and search of tids, but the tid store doesn't quite operate so simply anymore. What do you think, Masahiko?\n\nYeah, that's more realistic. TidStore now encodes TIDs slightly\ndifferently from the benchmark test.\n\nI've attached the patch that adds a simple benchmark test using\nTidStore. With this test, I got similar trends of results to yours\nwith gcc, but I've not analyzed them in depth yet.\n\nquery: select * from bench_tidstore_load(0, 10 * 1000 * 1000)\n\nv15:\n load_ms\n---------\n 816\n\nv25-0007 (remove dead code):\nload_ms\n---------\n 839\n\nv25-addendum...txt (no ordering):\n load_ms\n---------\n 820\n\nBTW it would be better to remove the RT_DEBUG macro from bench_radix_tree.c.\n\n>\n> I'm inclined to keep 0006, because it might give a slight boost, and 0007 because it's never a bad idea to remove dead code.\n\nYeah, these two changes make sense to me too.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 9 Feb 2023 16:08:03 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Feb 9, 2023 at 2:08 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> I think it's still important for lazy vacuum that an iteration over a\n> TID store returns TIDs in ascending order, because otherwise a heap\n> vacuum does random writes. That being said, we can have\n> RT_ITERATE_NEXT() return key-value pairs in an order regardless of how\n> the key chunks are stored in a node.\n\nOkay, we can keep that possibility in mind if we need to go there.\n\n> > Note: The regression seems to have started in v17, which is the first\nwith a full template.\n\n> > 0007 removes some dead code. RT_NODE_INSERT_INNER is only called during\nRT_SET_EXTEND, so it can't possibly find an existing key. This kind of\nchange is much easier with the inner/node cases handled together in a\ntemplate, as far as being sure of how those cases are different. I thought\nabout trying the search in assert builds and verifying it doesn't exist,\nbut thought yet another #ifdef would be too messy.\n\nIt just occurred to me that these facts might be related. v17 was the first\nuse of the full template, and I decided then I liked one of your earlier\npatches where replace_node() calls node_update_inner() better than calling\nnode_insert_inner() with a NULL parent, which was a bit hard to understand.\nThat now-dead code was actually used in the latter case for updating the\n(original) parent. It's possible that trying to use separate paths\ncontributed to the regression. I'll try the other way and report back.\n\n> I've attached the patch that adds a simple benchmark test using\n> TidStore. With this test, I got similar trends of results to yours\n> with gcc, but I've not analyzed them in depth yet.\n\nThanks for that! I'll take a look.\n\n> BTW it would be better to remove the RT_DEBUG macro from\nbench_radix_tree.c.\n\nAbsolutely.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Feb 9, 2023 at 2:08 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> I think it's still important for lazy vacuum that an iteration over a> TID store returns TIDs in ascending order, because otherwise a heap> vacuum does random writes. That being said, we can have> RT_ITERATE_NEXT() return key-value pairs in an order regardless of how> the key chunks are stored in a node.Okay, we can keep that possibility in mind if we need to go there.> > Note: The regression seems to have started in v17, which is the first with a full template.> > 0007 removes some dead code. RT_NODE_INSERT_INNER is only called during RT_SET_EXTEND, so it can't possibly find an existing key. This kind of change is much easier with the inner/node cases handled together in a template, as far as being sure of how those cases are different. I thought about trying the search in assert builds and verifying it doesn't exist, but thought yet another #ifdef would be too messy.It just occurred to me that these facts might be related. v17 was the first use of the full template, and I decided then I liked one of your earlier patches where replace_node() calls node_update_inner() better than calling node_insert_inner() with a NULL parent, which was a bit hard to understand. That now-dead code was actually used in the latter case for updating the (original) parent. It's possible that trying to use separate paths contributed to the regression. I'll try the other way and report back.> I've attached the patch that adds a simple benchmark test using> TidStore. With this test, I got similar trends of results to yours> with gcc, but I've not analyzed them in depth yet.Thanks for that! I'll take a look.> BTW it would be better to remove the RT_DEBUG macro from bench_radix_tree.c.Absolutely.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 9 Feb 2023 19:56:27 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Feb 9, 2023 at 2:08 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> query: select * from bench_tidstore_load(0, 10 * 1000 * 1000)\n>\n> v15:\n> load_ms\n> ---------\n> 816\n\nHow did you build the tid store and test on v15? I first tried to\napply v15-0009-PoC-lazy-vacuum-integration.patch, which conflicts with\nvacuum now, so reset all that, but still getting build errors because the\ntid store types and functions have changed.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Feb 9, 2023 at 2:08 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> query: select * from bench_tidstore_load(0, 10 * 1000 * 1000)>> v15:>  load_ms> --------->      816How did you build the tid store and test on v15? I first tried to apply v15-0009-PoC-lazy-vacuum-integration.patch, which conflicts with vacuum now, so reset all that, but still getting build errors because the tid store types and functions have changed.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Fri, 10 Feb 2023 13:51:24 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Feb 10, 2023 at 3:51 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Thu, Feb 9, 2023 at 2:08 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > query: select * from bench_tidstore_load(0, 10 * 1000 * 1000)\n> >\n> > v15:\n> > load_ms\n> > ---------\n> > 816\n>\n> How did you build the tid store and test on v15? I first tried to apply v15-0009-PoC-lazy-vacuum-integration.patch, which conflicts with vacuum now, so reset all that, but still getting build errors because the tid store types and functions have changed.\n\nI applied v26-0008-Add-TIDStore-to-store-sets-of-TIDs-ItemPointerDa.patch\non top of v15 radix tree and changed the TidStore so that it uses v15\n(non-templated) radixtree. That way, we can test TidStore using v15\nradix tree. I've attached the patch that I applied on top of\nv26-0008-Add-TIDStore-to-store-sets-of-TIDs-ItemPointerDa.patch.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Fri, 10 Feb 2023 16:15:46 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I didn't get any closer to radix-tree regression, but I did find some\ninefficiencies in tidstore_add_tids() that are worth talking about first,\naddressed in a rough fashion in the attached .txt addendums that I can\nclean up and incorporate later.\n\nTo start, I can reproduce the regression with this test as well:\n\nselect * from bench_tidstore_load(0, 10 * 1000 * 1000);\n\nv15 + v26 store + adjustments:\n mem_allocated | load_ms\n---------------+---------\n 98202152 | 1676\n\nv26 0001-0008\n mem_allocated | load_ms\n---------------+---------\n 98202032 | 1826\n\n...and reverting to the alternate way to update the parent didn't help:\n\nv26 0001-6, 0008, insert_inner w/ null parent\n\n mem_allocated | load_ms\n---------------+---------\n 98202032 | 1825\n\n...and I'm kind of glad that wasn't the problem, because going back to that\nwould be a pain for the shmem case.\n\nRunning perf doesn't show anything much different in the proportions (note\nthat rt_set must have been inlined when declared locally in v26):\n\nv15 + v26 store + adjustments:\n 65.88% postgres postgres [.] tidstore_add_tids\n 10.74% postgres postgres [.] rt_set\n 9.20% postgres postgres [.] palloc0\n 6.49% postgres postgres [.] rt_node_insert_leaf\n\nv26 0001-0008\n 78.50% postgres postgres [.] tidstore_add_tids\n 8.88% postgres postgres [.] palloc0\n 6.24% postgres postgres [.] local_rt_node_insert_leaf\n\nv2699-0001: The first thing I noticed is that palloc0 is taking way more\ntime than it should, and it's because the compiler doesn't know the\nvalues[] array is small. One reason we need to zero the array is to make\nthe algorithm agnostic about what order the offsets come in, as I requested\nin a previous review. Thinking some more, I was way too paranoid about\nthat. As long as access methods scan the line pointer array in the usual\nway, maybe we can just assert that the keys we create are in order, and\nzero any unused array entries as we find them. (I admit I can't actually\nthink of a reason we would ever encounter offsets out of order.) Also, we\ncan keep track of the last key we need to consider for insertion into the\nradix tree, and ignore the rest. That might shave a few cycles during the\nexclusive lock when the max offset of an LP_DEAD item < 64 on a given page,\nwhich I think would be common in the wild. I also got rid of the special\ncase for non-encoding, since shifting by zero should work the same way.\nThese together led to a nice speedup on the v26 branch:\n\n mem_allocated | load_ms\n---------------+---------\n 98202032 | 1386\n\nv2699-0002: The next thing I noticed is forming a full ItemIdPointer to\npass to tid_to_key_off(). That's bad for tidstore_add_tids() because\nItemPointerSetBlockNumber() must do this in order to allow the struct to be\nSHORTALIGN'd:\n\nstatic inline void\nBlockIdSet(BlockIdData *blockId, BlockNumber blockNumber)\n{\nblockId->bi_hi = blockNumber >> 16;\nblockId->bi_lo = blockNumber & 0xffff;\n}\n\nThen, tid_to_key_off() calls ItemPointerGetBlockNumber(), which must\nreverse the above process:\n\nstatic inline BlockNumber\nBlockIdGetBlockNumber(const BlockIdData *blockId)\n{\nreturn (((BlockNumber) blockId->bi_hi) << 16) | ((BlockNumber)\nblockId->bi_lo);\n}\n\nThere is no reason to do any of this if we're not reading/writing directly\nto/from an on-disk tid etc. To avoid this, I created a new function\nencode_key_off() [name could be better], which deals with the raw block\nnumber that we already have. Then turn tid_to_key_off() into a wrapper\naround that, since we still need the full conversion for\ntidstore_lookup_tid().\n\nv2699-0003: Get rid of all the remaining special cases for encoding/or not.\nI am unaware of the need to optimize that case or treat it in any way\ndifferently. I haven't tested this on an installation with non-default\nblocksize and didn't measure this separately, but 0002+0003 gives:\n\n mem_allocated | load_ms\n---------------+---------\n 98202032 | 1259\n\nIf these are acceptable, I can incorporate them into a later patchset. In\nany case, speeding up tidstore_add_tids() will make any regressions in the\nbacking radix tree more obvious. I will take a look at that next week.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Sat, 11 Feb 2023 12:33:42 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sat, Feb 11, 2023 at 2:33 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> I didn't get any closer to radix-tree regression,\n\nMe neither. It seems that in v26, inserting chunks into node-32 is\nslow but needs more analysis. I'll share if I found something\ninteresting.\n\n> but I did find some inefficiencies in tidstore_add_tids() that are worth talking about first, addressed in a rough fashion in the attached .txt addendums that I can clean up and incorporate later.\n>\n> To start, I can reproduce the regression with this test as well:\n>\n> select * from bench_tidstore_load(0, 10 * 1000 * 1000);\n>\n> v15 + v26 store + adjustments:\n> mem_allocated | load_ms\n> ---------------+---------\n> 98202152 | 1676\n>\n> v26 0001-0008\n> mem_allocated | load_ms\n> ---------------+---------\n> 98202032 | 1826\n>\n> ...and reverting to the alternate way to update the parent didn't help:\n>\n> v26 0001-6, 0008, insert_inner w/ null parent\n>\n> mem_allocated | load_ms\n> ---------------+---------\n> 98202032 | 1825\n>\n> ...and I'm kind of glad that wasn't the problem, because going back to that would be a pain for the shmem case.\n>\n> Running perf doesn't show anything much different in the proportions (note that rt_set must have been inlined when declared locally in v26):\n>\n> v15 + v26 store + adjustments:\n> 65.88% postgres postgres [.] tidstore_add_tids\n> 10.74% postgres postgres [.] rt_set\n> 9.20% postgres postgres [.] palloc0\n> 6.49% postgres postgres [.] rt_node_insert_leaf\n>\n> v26 0001-0008\n> 78.50% postgres postgres [.] tidstore_add_tids\n> 8.88% postgres postgres [.] palloc0\n> 6.24% postgres postgres [.] local_rt_node_insert_leaf\n>\n> v2699-0001: The first thing I noticed is that palloc0 is taking way more time than it should, and it's because the compiler doesn't know the values[] array is small. One reason we need to zero the array is to make the algorithm agnostic about what order the offsets come in, as I requested in a previous review. Thinking some more, I was way too paranoid about that. As long as access methods scan the line pointer array in the usual way, maybe we can just assert that the keys we create are in order, and zero any unused array entries as we find them. (I admit I can't actually think of a reason we would ever encounter offsets out of order.)\n\nI can think that something like traversing a HOT chain could visit\noffsets out of order. But fortunately we prune such collected TIDs\nbefore heap vacuum in heap case.\n\n> Also, we can keep track of the last key we need to consider for insertion into the radix tree, and ignore the rest. That might shave a few cycles during the exclusive lock when the max offset of an LP_DEAD item < 64 on a given page, which I think would be common in the wild. I also got rid of the special case for non-encoding, since shifting by zero should work the same way. These together led to a nice speedup on the v26 branch:\n>\n> mem_allocated | load_ms\n> ---------------+---------\n> 98202032 | 1386\n>\n> v2699-0002: The next thing I noticed is forming a full ItemIdPointer to pass to tid_to_key_off(). That's bad for tidstore_add_tids() because ItemPointerSetBlockNumber() must do this in order to allow the struct to be SHORTALIGN'd:\n>\n> static inline void\n> BlockIdSet(BlockIdData *blockId, BlockNumber blockNumber)\n> {\n> blockId->bi_hi = blockNumber >> 16;\n> blockId->bi_lo = blockNumber & 0xffff;\n> }\n>\n> Then, tid_to_key_off() calls ItemPointerGetBlockNumber(), which must reverse the above process:\n>\n> static inline BlockNumber\n> BlockIdGetBlockNumber(const BlockIdData *blockId)\n> {\n> return (((BlockNumber) blockId->bi_hi) << 16) | ((BlockNumber) blockId->bi_lo);\n> }\n>\n> There is no reason to do any of this if we're not reading/writing directly to/from an on-disk tid etc. To avoid this, I created a new function encode_key_off() [name could be better], which deals with the raw block number that we already have. Then turn tid_to_key_off() into a wrapper around that, since we still need the full conversion for tidstore_lookup_tid().\n>\n> v2699-0003: Get rid of all the remaining special cases for encoding/or not. I am unaware of the need to optimize that case or treat it in any way differently. I haven't tested this on an installation with non-default blocksize and didn't measure this separately, but 0002+0003 gives:\n>\n> mem_allocated | load_ms\n> ---------------+---------\n> 98202032 | 1259\n>\n> If these are acceptable, I can incorporate them into a later patchset.\n\nThese are nice improvements! I agree with all changes.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 13 Feb 2023 16:50:57 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Feb 13, 2023 at 2:51 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Sat, Feb 11, 2023 at 2:33 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > I didn't get any closer to radix-tree regression,\n>\n> Me neither. It seems that in v26, inserting chunks into node-32 is\n> slow but needs more analysis. I'll share if I found something\n> interesting.\n\nIf that were the case, then the other benchmarks I ran would likely have\nslowed down as well, but they are the same or faster. There is one\nmicrobenchmark I didn't run before: \"select * from\nbench_fixed_height_search(15)\" (15 to reduce noise from growing size class,\nand despite the name it measures load time as well). Trying this now shows\nno difference: a few runs range 19 to 21ms in each version. That also\nreinforces that update_inner is fine and that the move to value pointer API\ndidn't regress.\n\nChanging TIDS_PER_BLOCK_FOR_LOAD to 1 to stress the tree more gives (min of\n5, perf run separate from measurements):\n\nv15 + v26 store:\n\n mem_allocated | load_ms\n---------------+---------\n 98202152 | 553\n\n 19.71% postgres postgres [.] tidstore_add_tids\n+ 31.47% postgres postgres [.] rt_set\n= 51.18%\n\n 20.62% postgres postgres [.] rt_node_insert_leaf\n 6.05% postgres postgres [.] AllocSetAlloc\n 4.74% postgres postgres [.] AllocSetFree\n 4.62% postgres postgres [.] palloc\n 2.23% postgres postgres [.] SlabAlloc\n\nv26:\n\n mem_allocated | load_ms\n---------------+---------\n 98202032 | 617\n\n 57.45% postgres postgres [.] tidstore_add_tids\n\n 20.67% postgres postgres [.] local_rt_node_insert_leaf\n 5.99% postgres postgres [.] AllocSetAlloc\n 3.55% postgres postgres [.] palloc\n 3.05% postgres postgres [.] AllocSetFree\n 2.05% postgres postgres [.] SlabAlloc\n\nSo it seems the store itself got faster when we removed shared memory paths\nfrom the v26 store to test it against v15.\n\nI thought to favor the local memory case in the tidstore by controlling\ninlining -- it's smaller and will be called much more often, so I tried the\nfollowing (done in 0007)\n\n #define RT_PREFIX shared_rt\n #define RT_SHMEM\n-#define RT_SCOPE static\n+#define RT_SCOPE static pg_noinline\n\nThat brings it down to\n\n mem_allocated | load_ms\n---------------+---------\n 98202032 | 590\n\nThat's better, but not still not within noise level. Perhaps some slowdown\nis unavoidable, but it would be nice to understand why.\n\n> I can think that something like traversing a HOT chain could visit\n> offsets out of order. But fortunately we prune such collected TIDs\n> before heap vacuum in heap case.\n\nFurther, currently we *already* assume we populate the tid array in order\n(for binary search), so we can just continue assuming that (with an assert\nadded since it's more public in this form). I'm not sure why such basic\ncommon sense evaded me a few versions ago...\n\n> > If these are acceptable, I can incorporate them into a later patchset.\n>\n> These are nice improvements! I agree with all changes.\n\nGreat, I've squashed these into the tidstore patch (0004). Also added 0005,\nwhich is just a simplification.\n\nI squashed the earlier dead code removal into the radix tree patch.\n\nv27-0008 measures tid store iteration performance and adds a stub function\nto prevent spurious warnings, so the benchmarking module can always be\nbuilt.\n\nGetting the list of offsets from the old array for a given block is always\ntrivial, but tidstore_iter_extract_tids() is doing a huge amount of\nunnecessary work when TIDS_PER_BLOCK_FOR_LOAD is 1, enough to exceed the\nload time:\n\n mem_allocated | load_ms | iter_ms\n---------------+---------+---------\n 98202032 | 589 | 915\n\nFortunately, it's an easy fix, done in 0009.\n\n mem_allocated | load_ms | iter_ms\n---------------+---------+---------\n 98202032 | 589 | 153\n\nI'll soon resume more cosmetic review of the tid store, but this is enough\nto post.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Tue, 14 Feb 2023 18:24:38 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "The benchmark module shouldn't have been un-commented-out, so attached a\nrevert of that.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Tue, 14 Feb 2023 19:36:51 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Feb 14, 2023 at 8:24 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Feb 13, 2023 at 2:51 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Sat, Feb 11, 2023 at 2:33 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > I didn't get any closer to radix-tree regression,\n> >\n> > Me neither. It seems that in v26, inserting chunks into node-32 is\n> > slow but needs more analysis. I'll share if I found something\n> > interesting.\n>\n> If that were the case, then the other benchmarks I ran would likely have slowed down as well, but they are the same or faster. There is one microbenchmark I didn't run before: \"select * from bench_fixed_height_search(15)\" (15 to reduce noise from growing size class, and despite the name it measures load time as well). Trying this now shows no difference: a few runs range 19 to 21ms in each version. That also reinforces that update_inner is fine and that the move to value pointer API didn't regress.\n>\n> Changing TIDS_PER_BLOCK_FOR_LOAD to 1 to stress the tree more gives (min of 5, perf run separate from measurements):\n>\n> v15 + v26 store:\n>\n> mem_allocated | load_ms\n> ---------------+---------\n> 98202152 | 553\n>\n> 19.71% postgres postgres [.] tidstore_add_tids\n> + 31.47% postgres postgres [.] rt_set\n> = 51.18%\n>\n> 20.62% postgres postgres [.] rt_node_insert_leaf\n> 6.05% postgres postgres [.] AllocSetAlloc\n> 4.74% postgres postgres [.] AllocSetFree\n> 4.62% postgres postgres [.] palloc\n> 2.23% postgres postgres [.] SlabAlloc\n>\n> v26:\n>\n> mem_allocated | load_ms\n> ---------------+---------\n> 98202032 | 617\n>\n> 57.45% postgres postgres [.] tidstore_add_tids\n>\n> 20.67% postgres postgres [.] local_rt_node_insert_leaf\n> 5.99% postgres postgres [.] AllocSetAlloc\n> 3.55% postgres postgres [.] palloc\n> 3.05% postgres postgres [.] AllocSetFree\n> 2.05% postgres postgres [.] SlabAlloc\n>\n> So it seems the store itself got faster when we removed shared memory paths from the v26 store to test it against v15.\n>\n> I thought to favor the local memory case in the tidstore by controlling inlining -- it's smaller and will be called much more often, so I tried the following (done in 0007)\n>\n> #define RT_PREFIX shared_rt\n> #define RT_SHMEM\n> -#define RT_SCOPE static\n> +#define RT_SCOPE static pg_noinline\n>\n> That brings it down to\n>\n> mem_allocated | load_ms\n> ---------------+---------\n> 98202032 | 590\n\nThe improvement makes sense to me. I've also done the same test (with\nchanging TIDS_PER_BLOCK_FOR_LOAD to 1):\n\nw/o 0007 patch:\n mem_allocated | load_ms | iter_ms\n---------------+---------+---------\n 98202032 | 334 | 445\n(1 row)\n\nw/ 0007 patch:\n mem_allocated | load_ms | iter_ms\n---------------+---------+---------\n 98202032 | 316 | 434\n(1 row)\n\nOn the other hand, with TIDS_PER_BLOCK_FOR_LOAD being 30, the load\nperformance didn't improve:\n\nw/0 0007 patch:\n mem_allocated | load_ms | iter_ms\n---------------+---------+---------\n 98202032 | 601 | 608\n(1 row)\n\nw/ 0007 patch:\n mem_allocated | load_ms | iter_ms\n---------------+---------+---------\n 98202032 | 610 | 606\n(1 row)\n\nThat being said, it might be within noise level, so I agree with 0007 patch.\n\n> Perhaps some slowdown is unavoidable, but it would be nice to understand why.\n\nTrue.\n\n>\n> > I can think that something like traversing a HOT chain could visit\n> > offsets out of order. But fortunately we prune such collected TIDs\n> > before heap vacuum in heap case.\n>\n> Further, currently we *already* assume we populate the tid array in order (for binary search), so we can just continue assuming that (with an assert added since it's more public in this form). I'm not sure why such basic common sense evaded me a few versions ago...\n\nRight. TidStore is implemented not only for heap, so loading\nout-of-order TIDs might be important in the future.\n\n> > > If these are acceptable, I can incorporate them into a later patchset.\n> >\n> > These are nice improvements! I agree with all changes.\n>\n> Great, I've squashed these into the tidstore patch (0004). Also added 0005, which is just a simplification.\n>\n\nI've attached some small patches to improve the radix tree and tidstrore:\n\nWe have the following WIP comment in test_radixtree:\n\n// WIP: compiles with warnings because rt_attach is defined but not used\n// #define RT_SHMEM\n\nHow about unsetting RT_SCOPE to suppress warnings for unused rt_attach\nand friends?\n\nFYI I've briefly tested the TidStore with blocksize = 32kb, and it\nseems to work fine.\n\n> I squashed the earlier dead code removal into the radix tree patch.\n\nThanks!\n\n>\n> v27-0008 measures tid store iteration performance and adds a stub function to prevent spurious warnings, so the benchmarking module can always be built.\n>\n> Getting the list of offsets from the old array for a given block is always trivial, but tidstore_iter_extract_tids() is doing a huge amount of unnecessary work when TIDS_PER_BLOCK_FOR_LOAD is 1, enough to exceed the load time:\n>\n> mem_allocated | load_ms | iter_ms\n> ---------------+---------+---------\n> 98202032 | 589 | 915\n>\n> Fortunately, it's an easy fix, done in 0009.\n>\n> mem_allocated | load_ms | iter_ms\n> ---------------+---------+---------\n> 98202032 | 589 | 153\n\nCool!\n\n>\n> I'll soon resume more cosmetic review of the tid store, but this is enough to post.\n\nThanks!\n\nYou removed the vacuum integration patch from v27, is there any reason for that?\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 16 Feb 2023 12:24:16 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Feb 16, 2023 at 10:24 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Tue, Feb 14, 2023 at 8:24 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\n> > > I can think that something like traversing a HOT chain could visit\n> > > offsets out of order. But fortunately we prune such collected TIDs\n> > > before heap vacuum in heap case.\n> >\n> > Further, currently we *already* assume we populate the tid array in\norder (for binary search), so we can just continue assuming that (with an\nassert added since it's more public in this form). I'm not sure why such\nbasic common sense evaded me a few versions ago...\n>\n> Right. TidStore is implemented not only for heap, so loading\n> out-of-order TIDs might be important in the future.\n\nThat's what I was probably thinking about some weeks ago, but I'm having a\nhard time imagining how it would come up, even for something like the\nconveyor-belt concept.\n\n> We have the following WIP comment in test_radixtree:\n>\n> // WIP: compiles with warnings because rt_attach is defined but not used\n> // #define RT_SHMEM\n>\n> How about unsetting RT_SCOPE to suppress warnings for unused rt_attach\n> and friends?\n\nSounds good to me, and the other fixes make sense as well.\n\n> FYI I've briefly tested the TidStore with blocksize = 32kb, and it\n> seems to work fine.\n\nThat was on my list, so great! How about the other end -- nominally we\nallow 512b. (In practice it won't matter, but this would make sure I didn't\nmess anything up when forcing all MaxTuplesPerPage to encode.)\n\n> You removed the vacuum integration patch from v27, is there any reason\nfor that?\n\nJust an oversight.\n\nNow for some general comments on the tid store...\n\n+ * TODO: The caller must be certain that no other backend will attempt to\n+ * access the TidStore before calling this function. Other backend must\n+ * explicitly call tidstore_detach to free up backend-local memory\nassociated\n+ * with the TidStore. The backend that calls tidstore_destroy must not call\n+ * tidstore_detach.\n+ */\n+void\n+tidstore_destroy(TidStore *ts)\n\nDo we need to do anything for this todo?\n\nIt might help readability to have a concept of \"off_upper/off_lower\", just\nso we can describe things more clearly. The key is block + off_upper, and\nthe value is a bitmap of all the off_lower bits. I hinted at that in my\naddition of encode_key_off(). Along those lines, maybe\ns/TIDSTORE_OFFSET_MASK/TIDSTORE_OFFSET_LOWER_MASK/. Actually, I'm not even\nsure the TIDSTORE_ prefix is valuable for these local macros.\n\nThe word \"value\" as a variable name is pretty generic in this context, and\nit might be better to call it the off_lower_bitmap, at least in some\nplaces. The \"key\" doesn't have a good short term for naming, but in\ncomments we should make sure we're clear it's \"block# + off_upper\".\n\nI'm not a fan of the name \"tid_i\", even as a temp variable -- maybe\n\"compressed_tid\"?\n\nmaybe s/tid_to_key_off/encode_tid/ and s/encode_key_off/encode_block_offset/\n\nIt might be worth using typedefs for key and value type. Actually, since\nkey type is fixed for the foreseeable future, maybe the radix tree template\nshould define a key typedef?\n\nThe term \"result\" is probably fine within the tidstore, but as a public\nname used by vacuum, it's not very descriptive. I don't have a good idea,\nthough.\n\nSome files in backend/access use CamelCase for public functions, although\nit's not consistent. I think doing that for tidstore would help\nreadability, since they would stand out from rt_* functions and vacuum\nfunctions. It's a matter of taste, though.\n\nI don't understand the control flow in tidstore_iterate_next(), or when\nBlockNumberIsValid() is true. If this is the best way to code this, it\nneeds more commentary.\n\n\nSome comments on vacuum:\n\nI think we'd better get some real-world testing of this, fairly soon.\n\nI had an idea: If it's not too much effort, it might be worth splitting it\ninto two parts: one that just adds the store (not caring about its memory\nlimits or progress reporting etc). During index scan, check both the new\nstore and the array and log a warning (we don't want to exit or crash,\nbetter to try to investigate while live if possible) if the result doesn't\nmatch. Then perhaps set up an instance and let something like TPC-C run for\na few days. The second patch would just restore the rest of the current\npatch. That would help reassure us it's working as designed. Soon I plan to\ndo some measurements with vacuuming large tables to get some concrete\nnumbers that the community can get excited about.\n\nWe also want to verify that progress reporting works as designed and has no\nweird corner cases.\n\n * autovacuum_work_mem) memory space to keep track of dead TIDs. We\ninitially\n...\n+ * create a TidStore with the maximum bytes that can be used by the\nTidStore.\n\nThis kind of implies that we allocate the maximum bytes upfront. I think\nthis sentence can be removed. We already mentioned in the previous\nparagraph that we set an upper bound.\n\n- (errmsg(\"table \\\"%s\\\": removed %lld dead item identifiers in %u pages\",\n- vacrel->relname, (long long) index, vacuumed_pages)));\n+ (errmsg(\"table \\\"%s\\\": removed \" UINT64_FORMAT \"dead item identifiers in\n%u pages\",\n+ vacrel->relname, tidstore_num_tids(vacrel->dead_items),\n+ vacuumed_pages)));\n\nI don't think the format string has to change, since num_tids was changed\nback to int64 in an earlier patch version?\n\n- * the memory space for storing dead items allocated in the DSM segment.\nWe\n[a lot of whitespace adjustment]\n+ * the shared TidStore. We launch parallel worker processes at the start of\n\nThe old comment still seems mostly ok? Maybe just s/DSM segment/DSA area/\nor something else minor.\n\n- /* Estimate size for dead_items -- PARALLEL_VACUUM_KEY_DEAD_ITEMS */\n- est_dead_items_len = vac_max_items_to_alloc_size(max_items);\n- shm_toc_estimate_chunk(&pcxt->estimator, est_dead_items_len);\n+ /* Estimate size for dead tuple DSA -- PARALLEL_VACUUM_KEY_DSA */\n+ shm_toc_estimate_chunk(&pcxt->estimator, dsa_minsize);\n\nIf we're starting from the minimum, \"estimate\" doesn't really describe it\nanymore? Maybe \"Initial size\"?\nWhat does dsa_minimum_size() work out to in practice? 1MB?\nAlso, I think PARALLEL_VACUUM_KEY_DSA is left over from an earlier patch.\n\n\nLastly, on the radix tree:\n\nI find extend, set, and set_extend hard to keep straight when studying the\ncode. Maybe EXTEND -> EXTEND_UP , SET_EXTEND -> EXTEND_DOWN ?\n\nRT_ITER_UPDATE_KEY is unused, but I somehow didn't notice when turning it\ninto a template.\n\n+ /*\n+ * Set the node to the node iterator and update the iterator stack\n+ * from this node.\n+ */\n+ RT_UPDATE_ITER_STACK(iter, child, level - 1);\n\n+/*\n+ * Update each node_iter for inner nodes in the iterator node stack.\n+ */\n+static void\n+RT_UPDATE_ITER_STACK(RT_ITER *iter, RT_PTR_LOCAL from_node, int from)\n\nThese comments don't really help readers unfamiliar with the code. The\niteration coding in general needs clearer description.\n\nIn the test:\n\n+ 4, /* RT_NODE_KIND_4 */\n\nThe small size was changed to 3 -- if this test needs to know the max size\nfor each kind (class?), I wonder why it didn't fail. Should it? Maybe we\nneed symbols for the various fanouts.\n\nI also want to mention now that we better decide soon if we want to support\nshrinking of nodes for v16, even if the tidstore never shrinks. We'll need\nto do it at some point, but I'm not sure if doing it now would make more\nwork for future changes targeting highly concurrent workloads. If so, doing\nit now would just be wasted work. On the other hand, someone might have a\nuse that needs deletion before someone else needs concurrency. Just in\ncase, I have a start of node-shrinking logic, but needs some work because\nwe need the (local pointer) parent to update to the new smaller node, just\nlike the growing case.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Feb 16, 2023 at 10:24 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Tue, Feb 14, 2023 at 8:24 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> > > I can think that something like traversing a HOT chain could visit> > > offsets out of order. But fortunately we prune such collected TIDs> > > before heap vacuum in heap case.> >> > Further, currently we *already* assume we populate the tid array in order (for binary search), so we can just continue assuming that (with an assert added since it's more public in this form). I'm not sure why such basic common sense evaded me a few versions ago...>> Right. TidStore is implemented not only for heap, so loading> out-of-order TIDs might be important in the future.That's what I was probably thinking about some weeks ago, but I'm having a hard time imagining how it would come up, even for something like the conveyor-belt concept.> We have the following WIP comment in test_radixtree:>> // WIP: compiles with warnings because rt_attach is defined but not used> // #define RT_SHMEM>> How about unsetting RT_SCOPE to suppress warnings for unused rt_attach> and friends?Sounds good to me, and the other fixes make sense as well.> FYI I've briefly tested the TidStore with blocksize = 32kb, and it> seems to work fine.That was on my list, so great! How about the other end -- nominally we allow 512b. (In practice it won't matter, but this would make sure I didn't mess anything up when forcing all MaxTuplesPerPage to encode.)> You removed the vacuum integration patch from v27, is there any reason for that?Just an oversight.Now for some general comments on the tid store...+ * TODO: The caller must be certain that no other backend will attempt to+ * access the TidStore before calling this function. Other backend must+ * explicitly call tidstore_detach to free up backend-local memory associated+ * with the TidStore. The backend that calls tidstore_destroy must not call+ * tidstore_detach.+ */+void+tidstore_destroy(TidStore *ts)Do we need to do anything for this todo?It might help readability to have a concept of \"off_upper/off_lower\", just so we can describe things more clearly. The key is block + off_upper, and the value is a bitmap of all the off_lower bits. I hinted at that in my addition of encode_key_off(). Along those lines, maybe s/TIDSTORE_OFFSET_MASK/TIDSTORE_OFFSET_LOWER_MASK/. Actually, I'm not even sure the TIDSTORE_ prefix is valuable for these local macros.The word \"value\" as a variable name is pretty generic in this context, and it might be better to call it the off_lower_bitmap, at least in some places. The \"key\" doesn't have a good short term for naming, but in comments we should make sure we're clear it's \"block# + off_upper\".I'm not a fan of the name \"tid_i\", even as a temp variable -- maybe \"compressed_tid\"?maybe s/tid_to_key_off/encode_tid/ and s/encode_key_off/encode_block_offset/It might be worth using typedefs for key and value type. Actually, since key type is fixed for the foreseeable future, maybe the radix tree template should define a key typedef?The term \"result\" is probably fine within the tidstore, but as a public name used by vacuum, it's not very descriptive. I don't have a good idea, though.Some files in backend/access use CamelCase for public functions, although it's not consistent. I think doing that for tidstore would help readability, since they would stand out from rt_* functions and vacuum functions. It's a matter of taste, though.I don't understand the control flow in tidstore_iterate_next(), or when BlockNumberIsValid() is true. If this is the best way to code this, it needs more commentary.Some comments on vacuum:I think we'd better get some real-world testing of this, fairly soon.I had an idea: If it's not too much effort, it might be worth splitting it into two parts: one that just adds the store (not caring about its memory limits or progress reporting etc). During index scan, check both the new store and the array and log a warning (we don't want to exit or crash, better to try to investigate while live if possible) if the result doesn't match. Then perhaps set up an instance and let something like TPC-C run for a few days. The second patch would just restore the rest of the current patch. That would help reassure us it's working as designed. Soon I plan to do some measurements with vacuuming large tables to get some concrete numbers that the community can get excited about.We also want to verify that progress reporting works as designed and has no weird corner cases.  * autovacuum_work_mem) memory space to keep track of dead TIDs.  We initially...+ * create a TidStore with the maximum bytes that can be used by the TidStore.This kind of implies that we allocate the maximum bytes upfront. I think this sentence can be removed. We already mentioned in the previous paragraph that we set an upper bound.-\t\t\t(errmsg(\"table \\\"%s\\\": removed %lld dead item identifiers in %u pages\",-\t\t\t\t\tvacrel->relname, (long long) index, vacuumed_pages)));+\t\t\t(errmsg(\"table \\\"%s\\\": removed \" UINT64_FORMAT \"dead item identifiers in %u pages\",+\t\t\t\t\tvacrel->relname, tidstore_num_tids(vacrel->dead_items),+\t\t\t\t\tvacuumed_pages)));I don't think the format string has to change, since num_tids was changed back to int64 in an earlier patch version?- * the memory space for storing dead items allocated in the DSM segment.  We[a lot of whitespace adjustment]+ * the shared TidStore. We launch parallel worker processes at the start ofThe old comment still seems mostly ok? Maybe just s/DSM segment/DSA area/ or something else minor.-\t/* Estimate size for dead_items -- PARALLEL_VACUUM_KEY_DEAD_ITEMS */-\test_dead_items_len = vac_max_items_to_alloc_size(max_items);-\tshm_toc_estimate_chunk(&pcxt->estimator, est_dead_items_len);+\t/* Estimate size for dead tuple DSA -- PARALLEL_VACUUM_KEY_DSA */+\tshm_toc_estimate_chunk(&pcxt->estimator, dsa_minsize);If we're starting from the minimum, \"estimate\" doesn't really describe it anymore? Maybe \"Initial size\"?What does dsa_minimum_size() work out to in practice? 1MB?Also, I think PARALLEL_VACUUM_KEY_DSA is left over from an earlier patch.Lastly, on the radix tree:I find extend, set, and set_extend hard to keep straight when studying the code. Maybe EXTEND -> EXTEND_UP , SET_EXTEND -> EXTEND_DOWN ?RT_ITER_UPDATE_KEY is unused, but I somehow didn't notice when turning it into a template.+\t\t/*+\t\t * Set the node to the node iterator and update the iterator stack+\t\t * from this node.+\t\t */+\t\tRT_UPDATE_ITER_STACK(iter, child, level - 1);+/*+ * Update each node_iter for inner nodes in the iterator node stack.+ */+static void+RT_UPDATE_ITER_STACK(RT_ITER *iter, RT_PTR_LOCAL from_node, int from)These comments don't really help readers unfamiliar with the code. The iteration coding in general needs clearer description.In the test:+\t4,\t\t\t\t\t\t\t/* RT_NODE_KIND_4 */The small size was changed to 3 -- if this test needs to know the max size for each kind (class?), I wonder why it didn't fail. Should it? Maybe we need symbols for the various fanouts.I also want to mention now that we better decide soon if we want to support shrinking of nodes for v16, even if the tidstore never shrinks. We'll need to do it at some point, but I'm not sure if doing it now would make more work for future changes targeting highly concurrent workloads. If so, doing it now would just be wasted work. On the other hand, someone might have a use that needs deletion before someone else needs concurrency. Just in case, I have a start of node-shrinking logic, but needs some work because we need the (local pointer) parent to update to the new smaller node, just like the growing case.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 16 Feb 2023 16:22:56 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2023-02-16 16:22:56 +0700, John Naylor wrote:\n> On Thu, Feb 16, 2023 at 10:24 AM Masahiko Sawada <sawada.mshk@gmail.com>\n> > Right. TidStore is implemented not only for heap, so loading\n> > out-of-order TIDs might be important in the future.\n> \n> That's what I was probably thinking about some weeks ago, but I'm having a\n> hard time imagining how it would come up, even for something like the\n> conveyor-belt concept.\n\nWe really ought to replace the tid bitmap used for bitmap heap scans. The\nhashtable we use is a pretty awful data structure for it. And that's not\nfilled in-order, for example.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Thu, 16 Feb 2023 08:44:08 -0800", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Feb 16, 2023 at 11:44 PM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n> On 2023-02-16 16:22:56 +0700, John Naylor wrote:\n> > On Thu, Feb 16, 2023 at 10:24 AM Masahiko Sawada <sawada.mshk@gmail.com>\n> > > Right. TidStore is implemented not only for heap, so loading\n> > > out-of-order TIDs might be important in the future.\n> >\n> > That's what I was probably thinking about some weeks ago, but I'm\nhaving a\n> > hard time imagining how it would come up, even for something like the\n> > conveyor-belt concept.\n>\n> We really ought to replace the tid bitmap used for bitmap heap scans. The\n> hashtable we use is a pretty awful data structure for it. And that's not\n> filled in-order, for example.\n\nI took a brief look at that and agree we should sometime make it work there\nas well.\n\nv26 tidstore_add_tids() appears to assume that it's only called once per\nblocknumber. While the order of offsets doesn't matter there for a single\nblock, calling it again with the same block would wipe out the earlier\noffsets, IIUC. To do an actual \"add tid\" where the order doesn't matter, it\nseems we would need to (acquire lock if needed), read the current bitmap\nand OR in the new bit if it exists, then write it back out.\n\nThat sounds slow, so it might still be good for vacuum to call a function\nthat passes a block and an array of offsets that are assumed ordered (as in\nv28), but with a more accurate name, like tidstore_set_block_offsets().\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Feb 16, 2023 at 11:44 PM Andres Freund <andres@anarazel.de> wrote:>> Hi,>> On 2023-02-16 16:22:56 +0700, John Naylor wrote:> > On Thu, Feb 16, 2023 at 10:24 AM Masahiko Sawada <sawada.mshk@gmail.com>> > > Right. TidStore is implemented not only for heap, so loading> > > out-of-order TIDs might be important in the future.> >> > That's what I was probably thinking about some weeks ago, but I'm having a> > hard time imagining how it would come up, even for something like the> > conveyor-belt concept.>> We really ought to replace the tid bitmap used for bitmap heap scans. The> hashtable we use is a pretty awful data structure for it. And that's not> filled in-order, for example.I took a brief look at that and agree we should sometime make it work there as well.v26 tidstore_add_tids() appears to assume that it's only called once per blocknumber. While the order of offsets doesn't matter there for a single block, calling it again with the same block would wipe out the earlier offsets, IIUC. To do an actual \"add tid\" where the order doesn't matter, it seems we would need to (acquire lock if needed), read the current bitmap and OR in the new bit if it exists, then write it back out.That sounds slow, so it might still be good for vacuum to call a function that passes a block and an array of offsets that are assumed ordered (as in v28), but with a more accurate name, like tidstore_set_block_offsets().--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Fri, 17 Feb 2023 15:00:24 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Feb 16, 2023 at 6:23 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Thu, Feb 16, 2023 at 10:24 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Tue, Feb 14, 2023 at 8:24 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n>\n> > > > I can think that something like traversing a HOT chain could visit\n> > > > offsets out of order. But fortunately we prune such collected TIDs\n> > > > before heap vacuum in heap case.\n> > >\n> > > Further, currently we *already* assume we populate the tid array in order (for binary search), so we can just continue assuming that (with an assert added since it's more public in this form). I'm not sure why such basic common sense evaded me a few versions ago...\n> >\n> > Right. TidStore is implemented not only for heap, so loading\n> > out-of-order TIDs might be important in the future.\n>\n> That's what I was probably thinking about some weeks ago, but I'm having a hard time imagining how it would come up, even for something like the conveyor-belt concept.\n>\n> > We have the following WIP comment in test_radixtree:\n> >\n> > // WIP: compiles with warnings because rt_attach is defined but not used\n> > // #define RT_SHMEM\n> >\n> > How about unsetting RT_SCOPE to suppress warnings for unused rt_attach\n> > and friends?\n>\n> Sounds good to me, and the other fixes make sense as well.\n\nThanks, I merged them.\n\n>\n> > FYI I've briefly tested the TidStore with blocksize = 32kb, and it\n> > seems to work fine.\n>\n> That was on my list, so great! How about the other end -- nominally we allow 512b. (In practice it won't matter, but this would make sure I didn't mess anything up when forcing all MaxTuplesPerPage to encode.)\n\nAccording to the doc, the minimum block size is 1kB. It seems to work\nfine with 1kB blocks.\n\n>\n> > You removed the vacuum integration patch from v27, is there any reason for that?\n>\n> Just an oversight.\n>\n> Now for some general comments on the tid store...\n>\n> + * TODO: The caller must be certain that no other backend will attempt to\n> + * access the TidStore before calling this function. Other backend must\n> + * explicitly call tidstore_detach to free up backend-local memory associated\n> + * with the TidStore. The backend that calls tidstore_destroy must not call\n> + * tidstore_detach.\n> + */\n> +void\n> +tidstore_destroy(TidStore *ts)\n>\n> Do we need to do anything for this todo?\n\nSince it's practically no problem, I think we can live with it for\nnow. dshash also has the same todo.\n\n>\n> It might help readability to have a concept of \"off_upper/off_lower\", just so we can describe things more clearly. The key is block + off_upper, and the value is a bitmap of all the off_lower bits. I hinted at that in my addition of encode_key_off(). Along those lines, maybe s/TIDSTORE_OFFSET_MASK/TIDSTORE_OFFSET_LOWER_MASK/. Actually, I'm not even sure the TIDSTORE_ prefix is valuable for these local macros.\n>\n> The word \"value\" as a variable name is pretty generic in this context, and it might be better to call it the off_lower_bitmap, at least in some places. The \"key\" doesn't have a good short term for naming, but in comments we should make sure we're clear it's \"block# + off_upper\".\n>\n> I'm not a fan of the name \"tid_i\", even as a temp variable -- maybe \"compressed_tid\"?\n>\n> maybe s/tid_to_key_off/encode_tid/ and s/encode_key_off/encode_block_offset/\n>\n> It might be worth using typedefs for key and value type. Actually, since key type is fixed for the foreseeable future, maybe the radix tree template should define a key typedef?\n>\n> The term \"result\" is probably fine within the tidstore, but as a public name used by vacuum, it's not very descriptive. I don't have a good idea, though.\n>\n> Some files in backend/access use CamelCase for public functions, although it's not consistent. I think doing that for tidstore would help readability, since they would stand out from rt_* functions and vacuum functions. It's a matter of taste, though.\n>\n> I don't understand the control flow in tidstore_iterate_next(), or when BlockNumberIsValid() is true. If this is the best way to code this, it needs more commentary.\n\nThe attached 0008 patch addressed all above comments on tidstore.\n\n> Some comments on vacuum:\n>\n> I think we'd better get some real-world testing of this, fairly soon.\n>\n> I had an idea: If it's not too much effort, it might be worth splitting it into two parts: one that just adds the store (not caring about its memory limits or progress reporting etc). During index scan, check both the new store and the array and log a warning (we don't want to exit or crash, better to try to investigate while live if possible) if the result doesn't match. Then perhaps set up an instance and let something like TPC-C run for a few days. The second patch would just restore the rest of the current patch. That would help reassure us it's working as designed.\n\nYeah, I did a similar thing in an earlier version of tidstore patch.\nSince we're trying to introduce two new components: radix tree and\ntidstore, I sometimes find it hard to investigate failures happening\nduring lazy (parallel) vacuum due to a bug either in tidstore or radix\ntree. If there is a bug in lazy vacuum, we cannot even do initdb. So\nit might be a good idea to do such checks in USE_ASSERT_CHECKING (or\nwith another macro say DEBUG_TIDSTORE) builds. For example, TidStore\nstores tids to both the radix tree and array, and checks if the\nresults match when lookup or iteration. It will use more memory but it\nwould not be a big problem in USE_ASSERT_CHECKING builds. It would\nalso be great if we can enable such checks on some bf animals.\n\n> Soon I plan to do some measurements with vacuuming large tables to get some concrete numbers that the community can get excited about.\n\nThanks!\n\n>\n> We also want to verify that progress reporting works as designed and has no weird corner cases.\n>\n> * autovacuum_work_mem) memory space to keep track of dead TIDs. We initially\n> ...\n> + * create a TidStore with the maximum bytes that can be used by the TidStore.\n>\n> This kind of implies that we allocate the maximum bytes upfront. I think this sentence can be removed. We already mentioned in the previous paragraph that we set an upper bound.\n\nAgreed.\n\n>\n> - (errmsg(\"table \\\"%s\\\": removed %lld dead item identifiers in %u pages\",\n> - vacrel->relname, (long long) index, vacuumed_pages)));\n> + (errmsg(\"table \\\"%s\\\": removed \" UINT64_FORMAT \"dead item identifiers in %u pages\",\n> + vacrel->relname, tidstore_num_tids(vacrel->dead_items),\n> + vacuumed_pages)));\n>\n> I don't think the format string has to change, since num_tids was changed back to int64 in an earlier patch version?\n\nI think we need to change the format to INT64_FORMAT.\n\n>\n> - * the memory space for storing dead items allocated in the DSM segment. We\n> [a lot of whitespace adjustment]\n> + * the shared TidStore. We launch parallel worker processes at the start of\n>\n> The old comment still seems mostly ok? Maybe just s/DSM segment/DSA area/ or something else minor.\n>\n> - /* Estimate size for dead_items -- PARALLEL_VACUUM_KEY_DEAD_ITEMS */\n> - est_dead_items_len = vac_max_items_to_alloc_size(max_items);\n> - shm_toc_estimate_chunk(&pcxt->estimator, est_dead_items_len);\n> + /* Estimate size for dead tuple DSA -- PARALLEL_VACUUM_KEY_DSA */\n> + shm_toc_estimate_chunk(&pcxt->estimator, dsa_minsize);\n>\n> If we're starting from the minimum, \"estimate\" doesn't really describe it anymore? Maybe \"Initial size\"?\n> What does dsa_minimum_size() work out to in practice? 1MB?\n> Also, I think PARALLEL_VACUUM_KEY_DSA is left over from an earlier patch.\n>\n\nRight. The attached 0009 patch addressed comments on vacuum\nintegration except for the correctness checking.\n\n\n> Lastly, on the radix tree:\n>\n> I find extend, set, and set_extend hard to keep straight when studying the code. Maybe EXTEND -> EXTEND_UP , SET_EXTEND -> EXTEND_DOWN ?\n>\n> RT_ITER_UPDATE_KEY is unused, but I somehow didn't notice when turning it into a template.\n\nIt was used in radixtree_iter_impl.h. But I removed it as it was not necessary.\n\n>\n> + /*\n> + * Set the node to the node iterator and update the iterator stack\n> + * from this node.\n> + */\n> + RT_UPDATE_ITER_STACK(iter, child, level - 1);\n>\n> +/*\n> + * Update each node_iter for inner nodes in the iterator node stack.\n> + */\n> +static void\n> +RT_UPDATE_ITER_STACK(RT_ITER *iter, RT_PTR_LOCAL from_node, int from)\n>\n> These comments don't really help readers unfamiliar with the code. The iteration coding in general needs clearer description.\n>\n\nI agree with all of the above comments. The attached 0007 patch\naddressed comments on the radix tree.\n\n> In the test:\n>\n> + 4, /* RT_NODE_KIND_4 */\n>\n> The small size was changed to 3 -- if this test needs to know the max size for each kind (class?), I wonder why it didn't fail. Should it? Maybe we need symbols for the various fanouts.\n>\n\nSince this information is used to the number of keys inserted, it\ndoesn't check the node kind. So we just didn't test node-3. It might\nbe better to expose and use both RT_SIZE_CLASS and RT_SIZE_CLASS_INFO.\n\n> I also want to mention now that we better decide soon if we want to support shrinking of nodes for v16, even if the tidstore never shrinks. We'll need to do it at some point, but I'm not sure if doing it now would make more work for future changes targeting highly concurrent workloads. If so, doing it now would just be wasted work. On the other hand, someone might have a use that needs deletion before someone else needs concurrency. Just in case, I have a start of node-shrinking logic, but needs some work because we need the (local pointer) parent to update to the new smaller node, just like the growing case.\n\nThanks, that's also on my todo list. TBH I'm not sure we should\nimprove the deletion at this stage as there is no use case of deletion\nin the core. I'd prefer to focus on improving the quality of the\ncurrent radix tree and tidstore now, and I think we can support\nnode-shrinking once we are confident with the current implementation.\n\nOn Fri, Feb 17, 2023 at 5:00 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>That sounds slow, so it might still be good for vacuum to call a function that passes a block and an array of offsets that are assumed ordered (as in v28), but with a more accurate name, like tidstore_set_block_offsets().\n\ntidstore_set_block_offsets() sounds better. I used\nTidStoreSetBlockOffsets() in the latest patch set.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 20 Feb 2023 14:56:15 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Feb 20, 2023 at 2:56 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Feb 16, 2023 at 6:23 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > On Thu, Feb 16, 2023 at 10:24 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Tue, Feb 14, 2023 at 8:24 PM John Naylor\n> > > <john.naylor@enterprisedb.com> wrote:\n> >\n> > > > > I can think that something like traversing a HOT chain could visit\n> > > > > offsets out of order. But fortunately we prune such collected TIDs\n> > > > > before heap vacuum in heap case.\n> > > >\n> > > > Further, currently we *already* assume we populate the tid array in order (for binary search), so we can just continue assuming that (with an assert added since it's more public in this form). I'm not sure why such basic common sense evaded me a few versions ago...\n> > >\n> > > Right. TidStore is implemented not only for heap, so loading\n> > > out-of-order TIDs might be important in the future.\n> >\n> > That's what I was probably thinking about some weeks ago, but I'm having a hard time imagining how it would come up, even for something like the conveyor-belt concept.\n> >\n> > > We have the following WIP comment in test_radixtree:\n> > >\n> > > // WIP: compiles with warnings because rt_attach is defined but not used\n> > > // #define RT_SHMEM\n> > >\n> > > How about unsetting RT_SCOPE to suppress warnings for unused rt_attach\n> > > and friends?\n> >\n> > Sounds good to me, and the other fixes make sense as well.\n>\n> Thanks, I merged them.\n>\n> >\n> > > FYI I've briefly tested the TidStore with blocksize = 32kb, and it\n> > > seems to work fine.\n> >\n> > That was on my list, so great! How about the other end -- nominally we allow 512b. (In practice it won't matter, but this would make sure I didn't mess anything up when forcing all MaxTuplesPerPage to encode.)\n>\n> According to the doc, the minimum block size is 1kB. It seems to work\n> fine with 1kB blocks.\n>\n> >\n> > > You removed the vacuum integration patch from v27, is there any reason for that?\n> >\n> > Just an oversight.\n> >\n> > Now for some general comments on the tid store...\n> >\n> > + * TODO: The caller must be certain that no other backend will attempt to\n> > + * access the TidStore before calling this function. Other backend must\n> > + * explicitly call tidstore_detach to free up backend-local memory associated\n> > + * with the TidStore. The backend that calls tidstore_destroy must not call\n> > + * tidstore_detach.\n> > + */\n> > +void\n> > +tidstore_destroy(TidStore *ts)\n> >\n> > Do we need to do anything for this todo?\n>\n> Since it's practically no problem, I think we can live with it for\n> now. dshash also has the same todo.\n>\n> >\n> > It might help readability to have a concept of \"off_upper/off_lower\", just so we can describe things more clearly. The key is block + off_upper, and the value is a bitmap of all the off_lower bits. I hinted at that in my addition of encode_key_off(). Along those lines, maybe s/TIDSTORE_OFFSET_MASK/TIDSTORE_OFFSET_LOWER_MASK/. Actually, I'm not even sure the TIDSTORE_ prefix is valuable for these local macros.\n> >\n> > The word \"value\" as a variable name is pretty generic in this context, and it might be better to call it the off_lower_bitmap, at least in some places. The \"key\" doesn't have a good short term for naming, but in comments we should make sure we're clear it's \"block# + off_upper\".\n> >\n> > I'm not a fan of the name \"tid_i\", even as a temp variable -- maybe \"compressed_tid\"?\n> >\n> > maybe s/tid_to_key_off/encode_tid/ and s/encode_key_off/encode_block_offset/\n> >\n> > It might be worth using typedefs for key and value type. Actually, since key type is fixed for the foreseeable future, maybe the radix tree template should define a key typedef?\n> >\n> > The term \"result\" is probably fine within the tidstore, but as a public name used by vacuum, it's not very descriptive. I don't have a good idea, though.\n> >\n> > Some files in backend/access use CamelCase for public functions, although it's not consistent. I think doing that for tidstore would help readability, since they would stand out from rt_* functions and vacuum functions. It's a matter of taste, though.\n> >\n> > I don't understand the control flow in tidstore_iterate_next(), or when BlockNumberIsValid() is true. If this is the best way to code this, it needs more commentary.\n>\n> The attached 0008 patch addressed all above comments on tidstore.\n>\n> > Some comments on vacuum:\n> >\n> > I think we'd better get some real-world testing of this, fairly soon.\n> >\n> > I had an idea: If it's not too much effort, it might be worth splitting it into two parts: one that just adds the store (not caring about its memory limits or progress reporting etc). During index scan, check both the new store and the array and log a warning (we don't want to exit or crash, better to try to investigate while live if possible) if the result doesn't match. Then perhaps set up an instance and let something like TPC-C run for a few days. The second patch would just restore the rest of the current patch. That would help reassure us it's working as designed.\n>\n> Yeah, I did a similar thing in an earlier version of tidstore patch.\n> Since we're trying to introduce two new components: radix tree and\n> tidstore, I sometimes find it hard to investigate failures happening\n> during lazy (parallel) vacuum due to a bug either in tidstore or radix\n> tree. If there is a bug in lazy vacuum, we cannot even do initdb. So\n> it might be a good idea to do such checks in USE_ASSERT_CHECKING (or\n> with another macro say DEBUG_TIDSTORE) builds. For example, TidStore\n> stores tids to both the radix tree and array, and checks if the\n> results match when lookup or iteration. It will use more memory but it\n> would not be a big problem in USE_ASSERT_CHECKING builds. It would\n> also be great if we can enable such checks on some bf animals.\n\nI've tried this idea. Enabling this check on all debug builds (i.e.,\nwith USE_ASSERT_CHECKING macro) seems not a good idea so I use a\nspecial macro for that, TIDSTORE_DEBUG. I think we can define this\nmacro on some bf animals (or possibly a new bf animal).\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Wed, 22 Feb 2023 15:15:30 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Feb 22, 2023 at 1:16 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Mon, Feb 20, 2023 at 2:56 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> >\n> > Yeah, I did a similar thing in an earlier version of tidstore patch.\n\nOkay, if you had checks against the old array lookup in development, that\ngives us better confidence.\n\n> > Since we're trying to introduce two new components: radix tree and\n> > tidstore, I sometimes find it hard to investigate failures happening\n> > during lazy (parallel) vacuum due to a bug either in tidstore or radix\n> > tree. If there is a bug in lazy vacuum, we cannot even do initdb. So\n> > it might be a good idea to do such checks in USE_ASSERT_CHECKING (or\n> > with another macro say DEBUG_TIDSTORE) builds. For example, TidStore\n> > stores tids to both the radix tree and array, and checks if the\n> > results match when lookup or iteration. It will use more memory but it\n> > would not be a big problem in USE_ASSERT_CHECKING builds. It would\n> > also be great if we can enable such checks on some bf animals.\n>\n> I've tried this idea. Enabling this check on all debug builds (i.e.,\n> with USE_ASSERT_CHECKING macro) seems not a good idea so I use a\n> special macro for that, TIDSTORE_DEBUG. I think we can define this\n> macro on some bf animals (or possibly a new bf animal).\n\n I don't think any vacuum calls in regression tests would stress any of\nthis code very much, so it's not worth carrying the old way forward. I was\nthinking of only doing this as a short-time sanity check for testing a\nreal-world workload.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Wed, Feb 22, 2023 at 1:16 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Mon, Feb 20, 2023 at 2:56 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> >> > Yeah, I did a similar thing in an earlier version of tidstore patch.Okay, if you had checks against the old array lookup in development, that gives us better confidence. > > Since we're trying to introduce two new components: radix tree and> > tidstore, I sometimes find it hard to investigate failures happening> > during lazy (parallel) vacuum due to a bug either in tidstore or radix> > tree. If there is a bug in lazy vacuum, we cannot even do initdb. So> > it might be a good idea to do such checks in USE_ASSERT_CHECKING (or> > with another macro say DEBUG_TIDSTORE) builds. For example, TidStore> > stores tids to both the radix tree and array, and checks if the> > results match when lookup or iteration. It will use more memory but it> > would not be a big problem in USE_ASSERT_CHECKING builds. It would> > also be great if we can enable such checks on some bf animals.>> I've tried this idea. Enabling this check on all debug builds (i.e.,> with USE_ASSERT_CHECKING macro) seems not a good idea so I use a> special macro for that, TIDSTORE_DEBUG. I think we can define this> macro on some bf animals (or possibly a new bf animal). I don't think any vacuum calls in regression tests would stress any of this code very much, so it's not worth carrying the old way forward. I was thinking of only doing this as a short-time sanity check for testing a real-world workload.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 22 Feb 2023 14:35:00 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Feb 22, 2023 at 4:35 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Wed, Feb 22, 2023 at 1:16 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Feb 20, 2023 at 2:56 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > Yeah, I did a similar thing in an earlier version of tidstore patch.\n>\n> Okay, if you had checks against the old array lookup in development, that gives us better confidence.\n>\n> > > Since we're trying to introduce two new components: radix tree and\n> > > tidstore, I sometimes find it hard to investigate failures happening\n> > > during lazy (parallel) vacuum due to a bug either in tidstore or radix\n> > > tree. If there is a bug in lazy vacuum, we cannot even do initdb. So\n> > > it might be a good idea to do such checks in USE_ASSERT_CHECKING (or\n> > > with another macro say DEBUG_TIDSTORE) builds. For example, TidStore\n> > > stores tids to both the radix tree and array, and checks if the\n> > > results match when lookup or iteration. It will use more memory but it\n> > > would not be a big problem in USE_ASSERT_CHECKING builds. It would\n> > > also be great if we can enable such checks on some bf animals.\n> >\n> > I've tried this idea. Enabling this check on all debug builds (i.e.,\n> > with USE_ASSERT_CHECKING macro) seems not a good idea so I use a\n> > special macro for that, TIDSTORE_DEBUG. I think we can define this\n> > macro on some bf animals (or possibly a new bf animal).\n>\n> I don't think any vacuum calls in regression tests would stress any of this code very much, so it's not worth carrying the old way forward. I was thinking of only doing this as a short-time sanity check for testing a real-world workload.\n\nI guess that It would also be helpful at least until the GA release.\nPeople will be able to test them easily on their workloads or their\ncustom test scenarios.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 22 Feb 2023 17:29:23 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Feb 22, 2023 at 3:29 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Wed, Feb 22, 2023 at 4:35 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > I don't think any vacuum calls in regression tests would stress any of\nthis code very much, so it's not worth carrying the old way forward. I was\nthinking of only doing this as a short-time sanity check for testing a\nreal-world workload.\n>\n> I guess that It would also be helpful at least until the GA release.\n> People will be able to test them easily on their workloads or their\n> custom test scenarios.\n\nThat doesn't seem useful to me. If we've done enough testing to reassure us\nthe new way always gives the same answer, the old way is not needed at\ncommit time. If there is any doubt it will always give the same answer,\nthen the whole patchset won't be committed.\n\nTPC-C was just an example. It should have testing comparing the old and new\nmethods. If you have already done that to some degree, that might be\nenough. After performance tests, I'll also try some vacuums that use the\ncomparison patch.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Wed, Feb 22, 2023 at 3:29 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Wed, Feb 22, 2023 at 4:35 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> >> >  I don't think any vacuum calls in regression tests would stress any of this code very much, so it's not worth carrying the old way forward. I was thinking of only doing this as a short-time sanity check for testing a real-world workload.>> I guess that It would also be helpful at least until the GA release.> People will be able to test them easily on their workloads or their> custom test scenarios.That doesn't seem useful to me. If we've done enough testing to reassure us the new way always gives the same answer, the old way is not needed at commit time. If there is any doubt it will always give the same answer, then the whole patchset won't be committed.TPC-C was just an example. It should have testing comparing the old and new methods. If you have already done that to some degree, that might be enough. After performance tests, I'll also try some vacuums that use the comparison patch.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 22 Feb 2023 16:55:30 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I ran a couple \"in situ\" tests on server hardware using UUID columns, since\nthey are common in the real world and have bad correlation to heap\norder, so are a challenge for index vacuum.\n\n=== test 1, delete everything from a small table, with very small\nmaintenance_work_mem:\n\nalter system set shared_buffers ='4GB';\nalter system set max_wal_size ='10GB';\nalter system set checkpoint_timeout ='30 min';\nalter system set autovacuum =off;\n\n-- unrealistically low\nalter system set maintenance_work_mem = '32MB';\n\ncreate table if not exists test (x uuid);\ntruncate table test;\ninsert into test (x) select gen_random_uuid() from\ngenerate_series(1,50*1000*1000);\ncreate index on test (x);\n\ndelete from test;\nvacuum (verbose, truncate off) test;\n--\n\nmaster:\nINFO: finished vacuuming \"john.naylor.public.test\": index scans: 9\nsystem usage: CPU: user: 70.04 s, system: 19.85 s, elapsed: 802.06 s\n\nv29 patch:\nINFO: finished vacuuming \"john.naylor.public.test\": index scans: 1\nsystem usage: CPU: user: 9.80 s, system: 2.62 s, elapsed: 36.68 s\n\nThis is a bit artificial, but it's easy to construct cases where the array\nleads to multiple index scans but the new tid store can fit everythin\nwithout breaking a sweat. I didn't save the progress reporting, but v29 was\nusing about 11MB for tid storage.\n\n\n=== test 2: try to stress tid lookup with production maintenance_work_mem:\n1. use unlogged table to reduce noise\n2. vacuum freeze first to reduce heap scan time\n3. delete some records at the beginning and end of heap to defeat binary\nsearch's pre-check\n\nalter system set shared_buffers ='4GB';\nalter system set max_wal_size ='10GB';\nalter system set checkpoint_timeout ='30 min';\nalter system set autovacuum =off;\n\nalter system set maintenance_work_mem = '1GB';\n\ncreate unlogged table if not exists test (x uuid);\ntruncate table test;\ninsert into test (x) select gen_random_uuid() from\ngenerate_series(1,1000*1000*1000);\nvacuum_freeze test;\n\nselect pg_size_pretty(pg_table_size('test'));\n pg_size_pretty\n----------------\n 41 GB\n\ncreate index on test (x);\n\nselect pg_size_pretty(pg_total_relation_size('test'));\n pg_size_pretty\n----------------\n 71 GB\n\nselect max(ctid) from test;\n max\n--------------\n (5405405,75)\n\ndelete from test where ctid < '(100000,0)'::tid;\ndelete from test where ctid > '(5300000,0)'::tid;\n\nvacuum (verbose, truncate off) test;\n\nboth:\nINFO: vacuuming \"john.naylor.public.test\"\nINFO: finished vacuuming \"john.naylor.public.test\": index scans: 1\nindex scan needed: 205406 pages from table (3.80% of total) had 38000000\ndead item identifiers removed\n\n--\nmaster:\nsystem usage: CPU: user: 134.32 s, system: 19.24 s, elapsed: 286.14 s\n\nv29 patch:\nsystem usage: CPU: user: 97.71 s, system: 45.78 s, elapsed: 573.94 s\n\nThe entire vacuum took 25% less wall clock time. Reminder that this is\nwithout wal logging, and also unscientific because only one run.\n\n--\nI took 10 seconds of perf data while index vacuuming was going on (showing\ncalls > 2%):\n\nmaster:\n 40.59% postgres postgres [.] vac_cmp_itemptr\n 24.97% postgres libc-2.17.so [.] bsearch\n 6.67% postgres postgres [.] btvacuumpage\n 4.61% postgres [kernel.kallsyms] [k] copy_user_enhanced_fast_string\n 3.48% postgres postgres [.] PageIndexMultiDelete\n 2.67% postgres postgres [.] vac_tid_reaped\n 2.03% postgres postgres [.] compactify_tuples\n 2.01% postgres libc-2.17.so [.] __memcpy_ssse3_back\n\nv29 patch:\n\n 29.22% postgres postgres [.] TidStoreIsMember\n 9.30% postgres postgres [.] btvacuumpage\n 7.76% postgres postgres [.] PageIndexMultiDelete\n 6.31% postgres [kernel.kallsyms] [k] copy_user_enhanced_fast_string\n 5.60% postgres postgres [.] compactify_tuples\n 4.26% postgres libc-2.17.so [.] __memcpy_ssse3_back\n 4.12% postgres postgres [.] hash_search_with_hash_value\n\n--\nmaster:\npsql -c \"select phase, heap_blks_total, heap_blks_scanned, max_dead_tuples,\nnum_dead_tuples from pg_stat_progress_vacuum\"\n phase | heap_blks_total | heap_blks_scanned | max_dead_tuples\n| num_dead_tuples\n-------------------+-----------------+-------------------+-----------------+-----------------\n vacuuming indexes | 5405406 | 5405406 | 178956969\n| 38000000\n\nv29 patch:\npsql -c \"select phase, heap_blks_total, heap_blks_scanned,\nmax_dead_tuple_bytes, dead_tuple_bytes from pg_stat_progress_vacuum\"\n phase | heap_blks_total | heap_blks_scanned |\nmax_dead_tuple_bytes | dead_tuple_bytes\n-------------------+-----------------+-------------------+----------------------+------------------\n vacuuming indexes | 5405406 | 5405406 |\n1073670144 | 8678064\n\nHere, the old array pessimistically needs 1GB allocated (as for any table >\n~5GB), but only fills 228MB for tid lookup. The patch reports 8.7MB. Tables\nthat only fit, say, 30-50 tuples per page will have less extreme\ndifferences in memory use. Same for the case where only a couple dead items\noccur per page, with many uninteresting pages in between. Even so, the\nallocation will be much more accurately sized in the patch, especially in\nnon-parallel vacuum.\n\nThere are other cases that could be tested (I mentioned some above), but\nthis is enough to show the improvements possible.\n\nI still need to do some cosmetic follow-up to v29 as well as a status\nreport, and I will try to get back to that soon.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nI ran a couple \"in situ\" tests on server hardware using UUID columns, since they are common in the real world and have bad correlation to heap order, so are a challenge for index vacuum.=== test 1, delete everything from a small table, with very small maintenance_work_mem:alter system set shared_buffers ='4GB';alter system set max_wal_size ='10GB';alter system set checkpoint_timeout ='30 min';alter system set autovacuum =off;-- unrealistically lowalter system set maintenance_work_mem = '32MB';create table if not exists test (x uuid);truncate table test;insert into test (x) select gen_random_uuid() from generate_series(1,50*1000*1000);create index on test (x);delete from test;vacuum (verbose, truncate off) test;--master:INFO:  finished vacuuming \"john.naylor.public.test\": index scans: 9system usage: CPU: user: 70.04 s, system: 19.85 s, elapsed: 802.06 sv29 patch:INFO:  finished vacuuming \"john.naylor.public.test\": index scans: 1system usage: CPU: user: 9.80 s, system: 2.62 s, elapsed: 36.68 sThis is a bit artificial, but it's easy to construct cases where the array leads to multiple index scans but the new tid store can fit everythin without breaking a sweat. I didn't save the progress reporting, but v29 was using about 11MB for tid storage.=== test 2: try to stress tid lookup with production maintenance_work_mem:1. use unlogged table to reduce noise2. vacuum freeze first to reduce heap scan time3. delete some records at the beginning and end of heap to defeat binary search's pre-checkalter system set shared_buffers ='4GB';alter system set max_wal_size ='10GB';alter system set checkpoint_timeout ='30 min';alter system set autovacuum =off;alter system set maintenance_work_mem = '1GB';create unlogged table if not exists test (x uuid);truncate table test;insert into test (x) select gen_random_uuid() from generate_series(1,1000*1000*1000);vacuum_freeze test;select pg_size_pretty(pg_table_size('test')); pg_size_pretty ---------------- 41 GBcreate index on test (x);select pg_size_pretty(pg_total_relation_size('test')); pg_size_pretty ---------------- 71 GBselect max(ctid) from test;     max      -------------- (5405405,75)delete from test where ctid <  '(100000,0)'::tid;delete from test where ctid > '(5300000,0)'::tid;vacuum (verbose, truncate off) test;both:INFO:  vacuuming \"john.naylor.public.test\"INFO:  finished vacuuming \"john.naylor.public.test\": index scans: 1index scan needed: 205406 pages from table (3.80% of total) had 38000000 dead item identifiers removed--master: system usage: CPU: user: 134.32 s, system: 19.24 s, elapsed: 286.14 sv29 patch:system usage: CPU: user:  97.71 s, system: 45.78 s, elapsed: 573.94 sThe entire vacuum took 25% less wall clock time. Reminder that this is without wal logging, and also unscientific because only one run.--I took 10 seconds of perf data while index vacuuming was going on (showing calls > 2%):master:  40.59%  postgres  postgres            [.] vac_cmp_itemptr  24.97%  postgres  libc-2.17.so        [.] bsearch   6.67%  postgres  postgres            [.] btvacuumpage   4.61%  postgres  [kernel.kallsyms]   [k] copy_user_enhanced_fast_string   3.48%  postgres  postgres            [.] PageIndexMultiDelete   2.67%  postgres  postgres            [.] vac_tid_reaped   2.03%  postgres  postgres            [.] compactify_tuples   2.01%  postgres  libc-2.17.so        [.] __memcpy_ssse3_backv29 patch:  29.22%  postgres  postgres            [.] TidStoreIsMember   9.30%  postgres  postgres            [.] btvacuumpage   7.76%  postgres  postgres            [.] PageIndexMultiDelete   6.31%  postgres  [kernel.kallsyms]   [k] copy_user_enhanced_fast_string   5.60%  postgres  postgres            [.] compactify_tuples   4.26%  postgres  libc-2.17.so        [.] __memcpy_ssse3_back   4.12%  postgres  postgres            [.] hash_search_with_hash_value--master:psql -c \"select phase, heap_blks_total, heap_blks_scanned, max_dead_tuples, num_dead_tuples from pg_stat_progress_vacuum\"       phase       | heap_blks_total | heap_blks_scanned | max_dead_tuples | num_dead_tuples -------------------+-----------------+-------------------+-----------------+----------------- vacuuming indexes |         5405406 |           5405406 |       178956969 |        38000000v29 patch:psql  -c \"select phase, heap_blks_total, heap_blks_scanned, max_dead_tuple_bytes, dead_tuple_bytes from pg_stat_progress_vacuum\"       phase       | heap_blks_total | heap_blks_scanned | max_dead_tuple_bytes | dead_tuple_bytes -------------------+-----------------+-------------------+----------------------+------------------ vacuuming indexes |         5405406 |           5405406 |           1073670144 |          8678064Here, the old array pessimistically needs 1GB allocated (as for any table > ~5GB), but only fills 228MB for tid lookup. The patch reports 8.7MB. Tables that only fit, say, 30-50 tuples per page will have less extreme differences in memory use. Same for the case where only a couple dead items occur per page, with many uninteresting pages in between. Even so, the allocation will be much more accurately sized in the patch, especially in non-parallel vacuum.There are other cases that could be tested (I mentioned some above), but this is enough to show the improvements possible.I still need to do some cosmetic follow-up to v29 as well as a status report, and I will try to get back to that soon. --John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Thu, 23 Feb 2023 16:40:58 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Feb 22, 2023 at 6:55 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Wed, Feb 22, 2023 at 3:29 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Wed, Feb 22, 2023 at 4:35 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > I don't think any vacuum calls in regression tests would stress any of this code very much, so it's not worth carrying the old way forward. I was thinking of only doing this as a short-time sanity check for testing a real-world workload.\n> >\n> > I guess that It would also be helpful at least until the GA release.\n> > People will be able to test them easily on their workloads or their\n> > custom test scenarios.\n>\n> That doesn't seem useful to me. If we've done enough testing to reassure us the new way always gives the same answer, the old way is not needed at commit time. If there is any doubt it will always give the same answer, then the whole patchset won't be committed.\n\nTrue. Even if we're done enough testing we cannot claim there is no\nbug. My idea is to make the bug investigation easier but on\nreflection, it seems not the best idea given this purpose. Instead, it\nseems to be better to add more necessary assertions. What do you think\nabout the attached patch? Please note that it also includes the\nchanges for minimum memory requirement.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Fri, 24 Feb 2023 14:50:19 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Feb 23, 2023 at 6:41 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> I ran a couple \"in situ\" tests on server hardware using UUID columns, since they are common in the real world and have bad correlation to heap order, so are a challenge for index vacuum.\n\nThank you for the test!\n\n>\n> === test 1, delete everything from a small table, with very small maintenance_work_mem:\n>\n> alter system set shared_buffers ='4GB';\n> alter system set max_wal_size ='10GB';\n> alter system set checkpoint_timeout ='30 min';\n> alter system set autovacuum =off;\n>\n> -- unrealistically low\n> alter system set maintenance_work_mem = '32MB';\n>\n> create table if not exists test (x uuid);\n> truncate table test;\n> insert into test (x) select gen_random_uuid() from generate_series(1,50*1000*1000);\n> create index on test (x);\n>\n> delete from test;\n> vacuum (verbose, truncate off) test;\n> --\n>\n> master:\n> INFO: finished vacuuming \"john.naylor.public.test\": index scans: 9\n> system usage: CPU: user: 70.04 s, system: 19.85 s, elapsed: 802.06 s\n>\n> v29 patch:\n> INFO: finished vacuuming \"john.naylor.public.test\": index scans: 1\n> system usage: CPU: user: 9.80 s, system: 2.62 s, elapsed: 36.68 s\n>\n> This is a bit artificial, but it's easy to construct cases where the array leads to multiple index scans but the new tid store can fit everythin without breaking a sweat. I didn't save the progress reporting, but v29 was using about 11MB for tid storage.\n\nCool.\n\n>\n>\n> === test 2: try to stress tid lookup with production maintenance_work_mem:\n> 1. use unlogged table to reduce noise\n> 2. vacuum freeze first to reduce heap scan time\n> 3. delete some records at the beginning and end of heap to defeat binary search's pre-check\n>\n> alter system set shared_buffers ='4GB';\n> alter system set max_wal_size ='10GB';\n> alter system set checkpoint_timeout ='30 min';\n> alter system set autovacuum =off;\n>\n> alter system set maintenance_work_mem = '1GB';\n>\n> create unlogged table if not exists test (x uuid);\n> truncate table test;\n> insert into test (x) select gen_random_uuid() from generate_series(1,1000*1000*1000);\n> vacuum_freeze test;\n>\n> select pg_size_pretty(pg_table_size('test'));\n> pg_size_pretty\n> ----------------\n> 41 GB\n>\n> create index on test (x);\n>\n> select pg_size_pretty(pg_total_relation_size('test'));\n> pg_size_pretty\n> ----------------\n> 71 GB\n>\n> select max(ctid) from test;\n> max\n> --------------\n> (5405405,75)\n>\n> delete from test where ctid < '(100000,0)'::tid;\n> delete from test where ctid > '(5300000,0)'::tid;\n>\n> vacuum (verbose, truncate off) test;\n>\n> both:\n> INFO: vacuuming \"john.naylor.public.test\"\n> INFO: finished vacuuming \"john.naylor.public.test\": index scans: 1\n> index scan needed: 205406 pages from table (3.80% of total) had 38000000 dead item identifiers removed\n>\n> --\n> master:\n> system usage: CPU: user: 134.32 s, system: 19.24 s, elapsed: 286.14 s\n>\n> v29 patch:\n> system usage: CPU: user: 97.71 s, system: 45.78 s, elapsed: 573.94 s\n\nIn v29 vacuum took twice as long (286 s vs. 573 s)?\n\n>\n> The entire vacuum took 25% less wall clock time. Reminder that this is without wal logging, and also unscientific because only one run.\n>\n> --\n> I took 10 seconds of perf data while index vacuuming was going on (showing calls > 2%):\n>\n> master:\n> 40.59% postgres postgres [.] vac_cmp_itemptr\n> 24.97% postgres libc-2.17.so [.] bsearch\n> 6.67% postgres postgres [.] btvacuumpage\n> 4.61% postgres [kernel.kallsyms] [k] copy_user_enhanced_fast_string\n> 3.48% postgres postgres [.] PageIndexMultiDelete\n> 2.67% postgres postgres [.] vac_tid_reaped\n> 2.03% postgres postgres [.] compactify_tuples\n> 2.01% postgres libc-2.17.so [.] __memcpy_ssse3_back\n>\n> v29 patch:\n>\n> 29.22% postgres postgres [.] TidStoreIsMember\n> 9.30% postgres postgres [.] btvacuumpage\n> 7.76% postgres postgres [.] PageIndexMultiDelete\n> 6.31% postgres [kernel.kallsyms] [k] copy_user_enhanced_fast_string\n> 5.60% postgres postgres [.] compactify_tuples\n> 4.26% postgres libc-2.17.so [.] __memcpy_ssse3_back\n> 4.12% postgres postgres [.] hash_search_with_hash_value\n>\n> --\n> master:\n> psql -c \"select phase, heap_blks_total, heap_blks_scanned, max_dead_tuples, num_dead_tuples from pg_stat_progress_vacuum\"\n> phase | heap_blks_total | heap_blks_scanned | max_dead_tuples | num_dead_tuples\n> -------------------+-----------------+-------------------+-----------------+-----------------\n> vacuuming indexes | 5405406 | 5405406 | 178956969 | 38000000\n>\n> v29 patch:\n> psql -c \"select phase, heap_blks_total, heap_blks_scanned, max_dead_tuple_bytes, dead_tuple_bytes from pg_stat_progress_vacuum\"\n> phase | heap_blks_total | heap_blks_scanned | max_dead_tuple_bytes | dead_tuple_bytes\n> -------------------+-----------------+-------------------+----------------------+------------------\n> vacuuming indexes | 5405406 | 5405406 | 1073670144 | 8678064\n>\n> Here, the old array pessimistically needs 1GB allocated (as for any table > ~5GB), but only fills 228MB for tid lookup. The patch reports 8.7MB. Tables that only fit, say, 30-50 tuples per page will have less extreme differences in memory use. Same for the case where only a couple dead items occur per page, with many uninteresting pages in between. Even so, the allocation will be much more accurately sized in the patch, especially in non-parallel vacuum.\n\nAgreed.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 24 Feb 2023 17:40:27 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Feb 24, 2023 at 3:41 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> In v29 vacuum took twice as long (286 s vs. 573 s)?\n\nNot sure what happened there, and clearly I was looking at the wrong number\n:/\nI scripted the test for reproducibility and ran it three times. Also\nincluded some variations (attached):\n\nUUID times look comparable here, so no speedup or regression:\n\nmaster:\nsystem usage: CPU: user: 216.05 s, system: 35.81 s, elapsed: 634.22 s\nsystem usage: CPU: user: 173.71 s, system: 31.24 s, elapsed: 599.04 s\nsystem usage: CPU: user: 171.16 s, system: 30.21 s, elapsed: 583.21 s\n\nv29:\nsystem usage: CPU: user: 93.47 s, system: 40.92 s, elapsed: 594.10 s\nsystem usage: CPU: user: 99.58 s, system: 44.73 s, elapsed: 606.80 s\nsystem usage: CPU: user: 96.29 s, system: 42.74 s, elapsed: 600.10 s\n\nThen, I tried sequential integers, which is a much more favorable access\npattern in general, and the new tid storage shows substantial improvement:\n\nmaster:\nsystem usage: CPU: user: 100.39 s, system: 7.79 s, elapsed: 121.57 s\nsystem usage: CPU: user: 104.90 s, system: 8.81 s, elapsed: 124.24 s\nsystem usage: CPU: user: 95.04 s, system: 7.55 s, elapsed: 116.44 s\n\nv29:\nsystem usage: CPU: user: 24.57 s, system: 8.53 s, elapsed: 61.07 s\nsystem usage: CPU: user: 23.18 s, system: 8.25 s, elapsed: 58.99 s\nsystem usage: CPU: user: 23.20 s, system: 8.98 s, elapsed: 66.86 s\n\nThat's fast enough that I thought an improvement would show up even with\nstandard WAL logging (no separate attachment, since it's a trivial change).\nSeems a bit faster:\n\nmaster:\nsystem usage: CPU: user: 152.27 s, system: 11.76 s, elapsed: 216.86 s\nsystem usage: CPU: user: 137.25 s, system: 11.07 s, elapsed: 213.62 s\nsystem usage: CPU: user: 149.48 s, system: 12.15 s, elapsed: 220.96 s\n\nv29:\nsystem usage: CPU: user: 40.88 s, system: 15.99 s, elapsed: 170.98 s\nsystem usage: CPU: user: 41.33 s, system: 15.45 s, elapsed: 166.75 s\nsystem usage: CPU: user: 41.51 s, system: 18.20 s, elapsed: 203.94 s\n\nThere is more we could test here, but I feel better about these numbers.\n\nIn the next few days, I'll resume style review and list the remaining\nissues we need to address.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Tue, 28 Feb 2023 00:07:23 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Feb 24, 2023 at 12:50 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Wed, Feb 22, 2023 at 6:55 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > That doesn't seem useful to me. If we've done enough testing to\nreassure us the new way always gives the same answer, the old way is not\nneeded at commit time. If there is any doubt it will always give the same\nanswer, then the whole patchset won't be committed.\n\n> My idea is to make the bug investigation easier but on\n> reflection, it seems not the best idea given this purpose.\n\nMy concern with TIDSTORE_DEBUG is that it adds new code that mimics the old\ntid array. As I've said, that doesn't seem like a good thing to carry\nforward forevermore, in any form. Plus, comparing new code with new code is\nnot the same thing as comparing existing code with new code. That was my\nidea upthread.\n\nMaybe the effort my idea requires is too much vs. the likelihood of finding\na problem. In any case, it's clear that if I want that level of paranoia,\nI'm going to have to do it myself.\n\n> What do you think\n> about the attached patch? Please note that it also includes the\n> changes for minimum memory requirement.\n\nMost of the asserts look logical, or at least harmless.\n\n- int max_off; /* the maximum offset number */\n+ OffsetNumber max_off; /* the maximum offset number */\n\nI agree with using the specific type for offsets here, but I'm not sure why\nthis change belongs in this patch. If we decided against the new asserts,\nthis would be easy to lose.\n\nThis change, however, defies common sense:\n\n+/*\n+ * The minimum amount of memory required by TidStore is 2MB, the current\nminimum\n+ * valid value for the maintenance_work_mem GUC. This is required to\nallocate the\n+ * DSA initial segment, 1MB, and some meta data. This number is applied\nalso to\n+ * the local TidStore cases for simplicity.\n+ */\n+#define TIDSTORE_MIN_MEMORY (2 * 1024 * 1024L) /* 2MB */\n\n+ /* Sanity check for the max_bytes */\n+ if (max_bytes < TIDSTORE_MIN_MEMORY)\n+ elog(ERROR, \"memory for TidStore must be at least %ld, but %zu provided\",\n+ TIDSTORE_MIN_MEMORY, max_bytes);\n\nAside from the fact that this elog's something that would never get past\ndevelopment, the #define just adds a hard-coded copy of something that is\nalready hard-coded somewhere else, whose size depends on an implementation\ndetail in a third place.\n\nThis also assumes that all users of tid store are limited by\nmaintenance_work_mem. Andres thought of an example of some day unifying\nwith tidbitmap.c, and maybe other applications will be limited by work_mem.\n\nBut now that I'm looking at the guc tables, I am reminded that work_mem's\nminimum is 64kB, so this highlights a design problem: There is obviously no\nrequirement that the minimum work_mem has to be >= a single DSA segment,\neven though operations like parallel hash and parallel bitmap heap scan are\nlimited by work_mem. It would be nice to find out what happens with these\nparallel features when work_mem is tiny (maybe parallelism is not even\nconsidered?).\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Fri, Feb 24, 2023 at 12:50 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Wed, Feb 22, 2023 at 6:55 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> >> > That doesn't seem useful to me. If we've done enough testing to reassure us the new way always gives the same answer, the old way is not needed at commit time. If there is any doubt it will always give the same answer, then the whole patchset won't be committed.> My idea is to make the bug investigation easier but on> reflection, it seems not the best idea given this purpose.My concern with TIDSTORE_DEBUG is that it adds new code that mimics the old tid array. As I've said, that doesn't seem like a good thing to carry forward forevermore, in any form. Plus, comparing new code with new code is not the same thing as comparing existing code with new code. That was my idea upthread.Maybe the effort my idea requires is too much vs. the likelihood of finding a problem. In any case, it's clear that if I want that level of paranoia, I'm going to have to do it myself.> What do you think> about the attached patch? Please note that it also includes the> changes for minimum memory requirement.Most of the asserts look logical, or at least harmless.-\tint\t\tmax_off;\t\t/* the maximum offset number */+\tOffsetNumber\tmax_off;\t\t/* the maximum offset number */I agree with using the specific type for offsets here, but I'm not sure why this change belongs in this patch. If we decided against the new asserts, this would be easy to lose.This change, however, defies common sense:+/*+ * The minimum amount of memory required by TidStore is 2MB, the current minimum+ * valid value for the maintenance_work_mem GUC. This is required to allocate the+ * DSA initial segment, 1MB, and some meta data. This number is applied also to+ * the local TidStore cases for simplicity.+ */+#define TIDSTORE_MIN_MEMORY\t(2 * 1024 * 1024L)\t\t/* 2MB */+\t/* Sanity check for the max_bytes */+\tif (max_bytes < TIDSTORE_MIN_MEMORY)+\t\telog(ERROR, \"memory for TidStore must be at least %ld, but %zu provided\",+\t\t\t TIDSTORE_MIN_MEMORY, max_bytes);Aside from the fact that this elog's something that would never get past development, the #define just adds a hard-coded copy of something that is already hard-coded somewhere else, whose size depends on an implementation detail in a third place.This also assumes that all users of tid store are limited by maintenance_work_mem. Andres thought of an example of some day unifying with tidbitmap.c, and maybe other applications will be limited by work_mem.But now that I'm looking at the guc tables, I am reminded that work_mem's minimum is 64kB, so this highlights a design problem: There is obviously no requirement that the minimum work_mem has to be >= a single DSA segment, even though operations like parallel hash and parallel bitmap heap scan are limited by work_mem. It would be nice to find out what happens with these parallel features when work_mem is tiny (maybe parallelism is not even considered?). --John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Tue, 28 Feb 2023 13:42:27 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Feb 28, 2023 at 3:42 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Fri, Feb 24, 2023 at 12:50 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Wed, Feb 22, 2023 at 6:55 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > That doesn't seem useful to me. If we've done enough testing to reassure us the new way always gives the same answer, the old way is not needed at commit time. If there is any doubt it will always give the same answer, then the whole patchset won't be committed.\n>\n> > My idea is to make the bug investigation easier but on\n> > reflection, it seems not the best idea given this purpose.\n>\n> My concern with TIDSTORE_DEBUG is that it adds new code that mimics the old tid array. As I've said, that doesn't seem like a good thing to carry forward forevermore, in any form. Plus, comparing new code with new code is not the same thing as comparing existing code with new code. That was my idea upthread.\n>\n> Maybe the effort my idea requires is too much vs. the likelihood of finding a problem. In any case, it's clear that if I want that level of paranoia, I'm going to have to do it myself.\n>\n> > What do you think\n> > about the attached patch? Please note that it also includes the\n> > changes for minimum memory requirement.\n>\n> Most of the asserts look logical, or at least harmless.\n>\n> - int max_off; /* the maximum offset number */\n> + OffsetNumber max_off; /* the maximum offset number */\n>\n> I agree with using the specific type for offsets here, but I'm not sure why this change belongs in this patch. If we decided against the new asserts, this would be easy to lose.\n\nRight. I'll separate this change as a separate patch.\n\n>\n> This change, however, defies common sense:\n>\n> +/*\n> + * The minimum amount of memory required by TidStore is 2MB, the current minimum\n> + * valid value for the maintenance_work_mem GUC. This is required to allocate the\n> + * DSA initial segment, 1MB, and some meta data. This number is applied also to\n> + * the local TidStore cases for simplicity.\n> + */\n> +#define TIDSTORE_MIN_MEMORY (2 * 1024 * 1024L) /* 2MB */\n>\n> + /* Sanity check for the max_bytes */\n> + if (max_bytes < TIDSTORE_MIN_MEMORY)\n> + elog(ERROR, \"memory for TidStore must be at least %ld, but %zu provided\",\n> + TIDSTORE_MIN_MEMORY, max_bytes);\n>\n> Aside from the fact that this elog's something that would never get past development, the #define just adds a hard-coded copy of something that is already hard-coded somewhere else, whose size depends on an implementation detail in a third place.\n>\n> This also assumes that all users of tid store are limited by maintenance_work_mem. Andres thought of an example of some day unifying with tidbitmap.c, and maybe other applications will be limited by work_mem.\n>\n> But now that I'm looking at the guc tables, I am reminded that work_mem's minimum is 64kB, so this highlights a design problem: There is obviously no requirement that the minimum work_mem has to be >= a single DSA segment, even though operations like parallel hash and parallel bitmap heap scan are limited by work_mem.\n\nRight.\n\n> It would be nice to find out what happens with these parallel features when work_mem is tiny (maybe parallelism is not even considered?).\n\nIIUC both don't care about the allocated DSA segment size. Parallel\nhash accounts actual tuple (+ header) size as used memory but doesn't\nconsider how much DSA segment is allocated behind. Both parallel hash\nand parallel bitmap scan can work even with work_mem = 64kB, but when\nchecking the total DSA segment size allocated during these operations,\nit was 1MB.\n\nI realized that there is a similar memory limit design issue also on\nthe non-shared tidstore cases. We deduct 70kB from max_bytes but it\nwon't work fine with work_mem = 64kB. Probably we need to reconsider\nit. FYI 70kB comes from the maximum slab block size for node256.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 28 Feb 2023 22:20:17 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Feb 28, 2023 at 10:20 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Tue, Feb 28, 2023 at 3:42 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> >\n> > On Fri, Feb 24, 2023 at 12:50 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Wed, Feb 22, 2023 at 6:55 PM John Naylor\n> > > <john.naylor@enterprisedb.com> wrote:\n> > > >\n> > > > That doesn't seem useful to me. If we've done enough testing to reassure us the new way always gives the same answer, the old way is not needed at commit time. If there is any doubt it will always give the same answer, then the whole patchset won't be committed.\n> >\n> > > My idea is to make the bug investigation easier but on\n> > > reflection, it seems not the best idea given this purpose.\n> >\n> > My concern with TIDSTORE_DEBUG is that it adds new code that mimics the old tid array. As I've said, that doesn't seem like a good thing to carry forward forevermore, in any form. Plus, comparing new code with new code is not the same thing as comparing existing code with new code. That was my idea upthread.\n> >\n> > Maybe the effort my idea requires is too much vs. the likelihood of finding a problem. In any case, it's clear that if I want that level of paranoia, I'm going to have to do it myself.\n> >\n> > > What do you think\n> > > about the attached patch? Please note that it also includes the\n> > > changes for minimum memory requirement.\n> >\n> > Most of the asserts look logical, or at least harmless.\n> >\n> > - int max_off; /* the maximum offset number */\n> > + OffsetNumber max_off; /* the maximum offset number */\n> >\n> > I agree with using the specific type for offsets here, but I'm not sure why this change belongs in this patch. If we decided against the new asserts, this would be easy to lose.\n>\n> Right. I'll separate this change as a separate patch.\n>\n> >\n> > This change, however, defies common sense:\n> >\n> > +/*\n> > + * The minimum amount of memory required by TidStore is 2MB, the current minimum\n> > + * valid value for the maintenance_work_mem GUC. This is required to allocate the\n> > + * DSA initial segment, 1MB, and some meta data. This number is applied also to\n> > + * the local TidStore cases for simplicity.\n> > + */\n> > +#define TIDSTORE_MIN_MEMORY (2 * 1024 * 1024L) /* 2MB */\n> >\n> > + /* Sanity check for the max_bytes */\n> > + if (max_bytes < TIDSTORE_MIN_MEMORY)\n> > + elog(ERROR, \"memory for TidStore must be at least %ld, but %zu provided\",\n> > + TIDSTORE_MIN_MEMORY, max_bytes);\n> >\n> > Aside from the fact that this elog's something that would never get past development, the #define just adds a hard-coded copy of something that is already hard-coded somewhere else, whose size depends on an implementation detail in a third place.\n> >\n> > This also assumes that all users of tid store are limited by maintenance_work_mem. Andres thought of an example of some day unifying with tidbitmap.c, and maybe other applications will be limited by work_mem.\n> >\n> > But now that I'm looking at the guc tables, I am reminded that work_mem's minimum is 64kB, so this highlights a design problem: There is obviously no requirement that the minimum work_mem has to be >= a single DSA segment, even though operations like parallel hash and parallel bitmap heap scan are limited by work_mem.\n>\n> Right.\n>\n> > It would be nice to find out what happens with these parallel features when work_mem is tiny (maybe parallelism is not even considered?).\n>\n> IIUC both don't care about the allocated DSA segment size. Parallel\n> hash accounts actual tuple (+ header) size as used memory but doesn't\n> consider how much DSA segment is allocated behind. Both parallel hash\n> and parallel bitmap scan can work even with work_mem = 64kB, but when\n> checking the total DSA segment size allocated during these operations,\n> it was 1MB.\n>\n> I realized that there is a similar memory limit design issue also on\n> the non-shared tidstore cases. We deduct 70kB from max_bytes but it\n> won't work fine with work_mem = 64kB. Probably we need to reconsider\n> it. FYI 70kB comes from the maximum slab block size for node256.\n\nCurrently, we calculate the slab block size enough to allocate 32\nchunks from there. For node256, the leaf node is 2,088 bytes and the\nslab block size is 66,816 bytes. One idea to fix this issue to\ndecrease it. For example, with 16 chunks the slab block size is 33,408\nbytes and with 8 chunks it's 16,704 bytes. I ran a brief benchmark\ntest with 70kB block size and 16kB block size:\n\n* 70kB slab blocks:\nselect * from bench_search_random_nodes(20 * 1000 * 1000, '0xFFFFFF');\nheight = 2, n3 = 0, n15 = 0, n32 = 0, n125 = 0, n256 = 65793\n mem_allocated | load_ms | search_ms\n---------------+---------+-----------\n 143085184 | 1216 | 750\n(1 row)\n\n* 16kB slab blocks:\nselect * from bench_search_random_nodes(20 * 1000 * 1000, '0xFFFFFF');\nheight = 2, n3 = 0, n15 = 0, n32 = 0, n125 = 0, n256 = 65793\n mem_allocated | load_ms | search_ms\n---------------+---------+-----------\n 157601248 | 1220 | 786\n(1 row)\n\nThere is a performance difference a bit but a smaller slab block size\nseems to be acceptable if there is no other better way.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 1 Mar 2023 00:09:16 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Feb 28, 2023 at 10:09 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Tue, Feb 28, 2023 at 10:20 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> >\n> > On Tue, Feb 28, 2023 at 3:42 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > >\n> > > On Fri, Feb 24, 2023 at 12:50 PM Masahiko Sawada <\nsawada.mshk@gmail.com> wrote:\n> > > >\n> > > > On Wed, Feb 22, 2023 at 6:55 PM John Naylor\n> > > > <john.naylor@enterprisedb.com> wrote:\n> > > > >\n> > > > > That doesn't seem useful to me. If we've done enough testing to\nreassure us the new way always gives the same answer, the old way is not\nneeded at commit time. If there is any doubt it will always give the same\nanswer, then the whole patchset won't be committed.\n> > >\n> > > > My idea is to make the bug investigation easier but on\n> > > > reflection, it seems not the best idea given this purpose.\n> > >\n> > > My concern with TIDSTORE_DEBUG is that it adds new code that mimics\nthe old tid array. As I've said, that doesn't seem like a good thing to\ncarry forward forevermore, in any form. Plus, comparing new code with new\ncode is not the same thing as comparing existing code with new code. That\nwas my idea upthread.\n> > >\n> > > Maybe the effort my idea requires is too much vs. the likelihood of\nfinding a problem. In any case, it's clear that if I want that level of\nparanoia, I'm going to have to do it myself.\n> > >\n> > > > What do you think\n> > > > about the attached patch? Please note that it also includes the\n> > > > changes for minimum memory requirement.\n> > >\n> > > Most of the asserts look logical, or at least harmless.\n> > >\n> > > - int max_off; /* the maximum offset number */\n> > > + OffsetNumber max_off; /* the maximum offset number */\n> > >\n> > > I agree with using the specific type for offsets here, but I'm not\nsure why this change belongs in this patch. If we decided against the new\nasserts, this would be easy to lose.\n> >\n> > Right. I'll separate this change as a separate patch.\n> >\n> > >\n> > > This change, however, defies common sense:\n> > >\n> > > +/*\n> > > + * The minimum amount of memory required by TidStore is 2MB, the\ncurrent minimum\n> > > + * valid value for the maintenance_work_mem GUC. This is required to\nallocate the\n> > > + * DSA initial segment, 1MB, and some meta data. This number is\napplied also to\n> > > + * the local TidStore cases for simplicity.\n> > > + */\n> > > +#define TIDSTORE_MIN_MEMORY (2 * 1024 * 1024L) /* 2MB */\n> > >\n> > > + /* Sanity check for the max_bytes */\n> > > + if (max_bytes < TIDSTORE_MIN_MEMORY)\n> > > + elog(ERROR, \"memory for TidStore must be at least %ld, but %zu\nprovided\",\n> > > + TIDSTORE_MIN_MEMORY, max_bytes);\n> > >\n> > > Aside from the fact that this elog's something that would never get\npast development, the #define just adds a hard-coded copy of something that\nis already hard-coded somewhere else, whose size depends on an\nimplementation detail in a third place.\n> > >\n> > > This also assumes that all users of tid store are limited by\nmaintenance_work_mem. Andres thought of an example of some day unifying\nwith tidbitmap.c, and maybe other applications will be limited by work_mem.\n> > >\n> > > But now that I'm looking at the guc tables, I am reminded that\nwork_mem's minimum is 64kB, so this highlights a design problem: There is\nobviously no requirement that the minimum work_mem has to be >= a single\nDSA segment, even though operations like parallel hash and parallel bitmap\nheap scan are limited by work_mem.\n> >\n> > Right.\n> >\n> > > It would be nice to find out what happens with these parallel\nfeatures when work_mem is tiny (maybe parallelism is not even considered?).\n> >\n> > IIUC both don't care about the allocated DSA segment size. Parallel\n> > hash accounts actual tuple (+ header) size as used memory but doesn't\n> > consider how much DSA segment is allocated behind. Both parallel hash\n> > and parallel bitmap scan can work even with work_mem = 64kB, but when\n> > checking the total DSA segment size allocated during these operations,\n> > it was 1MB.\n> >\n> > I realized that there is a similar memory limit design issue also on\n> > the non-shared tidstore cases. We deduct 70kB from max_bytes but it\n> > won't work fine with work_mem = 64kB. Probably we need to reconsider\n> > it. FYI 70kB comes from the maximum slab block size for node256.\n>\n> Currently, we calculate the slab block size enough to allocate 32\n> chunks from there. For node256, the leaf node is 2,088 bytes and the\n> slab block size is 66,816 bytes. One idea to fix this issue to\n> decrease it.\n\nI think we're trying to solve the wrong problem here. I need to study this\nmore, but it seems that code that needs to stay within a memory limit only\nneeds to track what's been allocated in chunks within a block, since\nwriting there is what invokes a page fault. If we're not keeping track of\neach and every chunk space, for speed, it doesn't follow that we need to\nkeep every block allocation within the configured limit. I'm guessing we\ncan just ask the context if the block space has gone *over* the limit, and\nwe can assume that the last allocation we perform will only fault one\nadditional page. We need to have a clear answer on this before doing\nanything else.\n\nIf that's correct, and I'm not positive yet, we can get rid of all the\nfragile assumptions about things the tid store has no business knowing\nabout, as well as the guc change. I'm not sure how this affects progress\nreporting, because it would be nice if it didn't report dead_tuple_bytes\nbigger than max_dead_tuple_bytes.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Tue, Feb 28, 2023 at 10:09 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Tue, Feb 28, 2023 at 10:20 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> >> > On Tue, Feb 28, 2023 at 3:42 PM John Naylor> > <john.naylor@enterprisedb.com> wrote:> > >> > >> > > On Fri, Feb 24, 2023 at 12:50 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> > > >> > > > On Wed, Feb 22, 2023 at 6:55 PM John Naylor> > > > <john.naylor@enterprisedb.com> wrote:> > > > >> > > > > That doesn't seem useful to me. If we've done enough testing to reassure us the new way always gives the same answer, the old way is not needed at commit time. If there is any doubt it will always give the same answer, then the whole patchset won't be committed.> > >> > > > My idea is to make the bug investigation easier but on> > > > reflection, it seems not the best idea given this purpose.> > >> > > My concern with TIDSTORE_DEBUG is that it adds new code that mimics the old tid array. As I've said, that doesn't seem like a good thing to carry forward forevermore, in any form. Plus, comparing new code with new code is not the same thing as comparing existing code with new code. That was my idea upthread.> > >> > > Maybe the effort my idea requires is too much vs. the likelihood of finding a problem. In any case, it's clear that if I want that level of paranoia, I'm going to have to do it myself.> > >> > > > What do you think> > > > about the attached patch? Please note that it also includes the> > > > changes for minimum memory requirement.> > >> > > Most of the asserts look logical, or at least harmless.> > >> > > - int max_off; /* the maximum offset number */> > > + OffsetNumber max_off; /* the maximum offset number */> > >> > > I agree with using the specific type for offsets here, but I'm not sure why this change belongs in this patch. If we decided against the new asserts, this would be easy to lose.> >> > Right. I'll separate this change as a separate patch.> >> > >> > > This change, however, defies common sense:> > >> > > +/*> > > + * The minimum amount of memory required by TidStore is 2MB, the current minimum> > > + * valid value for the maintenance_work_mem GUC. This is required to allocate the> > > + * DSA initial segment, 1MB, and some meta data. This number is applied also to> > > + * the local TidStore cases for simplicity.> > > + */> > > +#define TIDSTORE_MIN_MEMORY (2 * 1024 * 1024L) /* 2MB */> > >> > > + /* Sanity check for the max_bytes */> > > + if (max_bytes < TIDSTORE_MIN_MEMORY)> > > + elog(ERROR, \"memory for TidStore must be at least %ld, but %zu provided\",> > > + TIDSTORE_MIN_MEMORY, max_bytes);> > >> > > Aside from the fact that this elog's something that would never get past development, the #define just adds a hard-coded copy of something that is already hard-coded somewhere else, whose size depends on an implementation detail in a third place.> > >> > > This also assumes that all users of tid store are limited by maintenance_work_mem. Andres thought of an example of some day unifying with tidbitmap.c, and maybe other applications will be limited by work_mem.> > >> > > But now that I'm looking at the guc tables, I am reminded that work_mem's minimum is 64kB, so this highlights a design problem: There is obviously no requirement that the minimum work_mem has to be >= a single DSA segment, even though operations like parallel hash and parallel bitmap heap scan are limited by work_mem.> >> > Right.> >> > >  It would be nice to find out what happens with these parallel features when work_mem is tiny (maybe parallelism is not even considered?).> >> > IIUC both don't care about the allocated DSA segment size. Parallel> > hash accounts actual tuple (+ header) size as used memory but doesn't> > consider how much DSA segment is allocated behind. Both parallel hash> > and parallel bitmap scan can work even with work_mem = 64kB, but when> > checking the total DSA segment size allocated during these operations,> > it was 1MB.> >> > I realized that there is a similar memory limit design issue also on> > the non-shared tidstore cases. We deduct 70kB from max_bytes but it> > won't work fine with work_mem = 64kB.  Probably we need to reconsider> > it. FYI 70kB comes from the maximum slab block size for node256.>> Currently, we calculate the slab block size enough to allocate 32> chunks from there. For node256, the leaf node is 2,088 bytes and the> slab block size is 66,816 bytes. One idea to fix this issue to> decrease it.I think we're trying to solve the wrong problem here. I need to study this more, but it seems that code that needs to stay within a memory limit only needs to track what's been allocated in chunks within a block, since writing there is what invokes a page fault. If we're not keeping track of each and every chunk space, for speed, it doesn't follow that we need to keep every block allocation within the configured limit. I'm guessing we can just ask the context if the block space has gone *over* the limit, and we can assume that the last allocation we perform will only fault one additional page. We need to have a clear answer on this before doing anything else.If that's correct, and I'm not positive yet, we can get rid of all the fragile assumptions about things the tid store has no business knowing about, as well as the guc change. I'm not sure how this affects progress reporting, because it would be nice if it didn't report dead_tuple_bytes bigger than max_dead_tuple_bytes.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 1 Mar 2023 13:37:01 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 1, 2023 at 3:37 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> On Tue, Feb 28, 2023 at 10:09 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Tue, Feb 28, 2023 at 10:20 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Tue, Feb 28, 2023 at 3:42 PM John Naylor\n> > > <john.naylor@enterprisedb.com> wrote:\n> > > >\n> > > >\n> > > > On Fri, Feb 24, 2023 at 12:50 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > > >\n> > > > > On Wed, Feb 22, 2023 at 6:55 PM John Naylor\n> > > > > <john.naylor@enterprisedb.com> wrote:\n> > > > > >\n> > > > > > That doesn't seem useful to me. If we've done enough testing to reassure us the new way always gives the same answer, the old way is not needed at commit time. If there is any doubt it will always give the same answer, then the whole patchset won't be committed.\n> > > >\n> > > > > My idea is to make the bug investigation easier but on\n> > > > > reflection, it seems not the best idea given this purpose.\n> > > >\n> > > > My concern with TIDSTORE_DEBUG is that it adds new code that mimics the old tid array. As I've said, that doesn't seem like a good thing to carry forward forevermore, in any form. Plus, comparing new code with new code is not the same thing as comparing existing code with new code. That was my idea upthread.\n> > > >\n> > > > Maybe the effort my idea requires is too much vs. the likelihood of finding a problem. In any case, it's clear that if I want that level of paranoia, I'm going to have to do it myself.\n> > > >\n> > > > > What do you think\n> > > > > about the attached patch? Please note that it also includes the\n> > > > > changes for minimum memory requirement.\n> > > >\n> > > > Most of the asserts look logical, or at least harmless.\n> > > >\n> > > > - int max_off; /* the maximum offset number */\n> > > > + OffsetNumber max_off; /* the maximum offset number */\n> > > >\n> > > > I agree with using the specific type for offsets here, but I'm not sure why this change belongs in this patch. If we decided against the new asserts, this would be easy to lose.\n> > >\n> > > Right. I'll separate this change as a separate patch.\n> > >\n> > > >\n> > > > This change, however, defies common sense:\n> > > >\n> > > > +/*\n> > > > + * The minimum amount of memory required by TidStore is 2MB, the current minimum\n> > > > + * valid value for the maintenance_work_mem GUC. This is required to allocate the\n> > > > + * DSA initial segment, 1MB, and some meta data. This number is applied also to\n> > > > + * the local TidStore cases for simplicity.\n> > > > + */\n> > > > +#define TIDSTORE_MIN_MEMORY (2 * 1024 * 1024L) /* 2MB */\n> > > >\n> > > > + /* Sanity check for the max_bytes */\n> > > > + if (max_bytes < TIDSTORE_MIN_MEMORY)\n> > > > + elog(ERROR, \"memory for TidStore must be at least %ld, but %zu provided\",\n> > > > + TIDSTORE_MIN_MEMORY, max_bytes);\n> > > >\n> > > > Aside from the fact that this elog's something that would never get past development, the #define just adds a hard-coded copy of something that is already hard-coded somewhere else, whose size depends on an implementation detail in a third place.\n> > > >\n> > > > This also assumes that all users of tid store are limited by maintenance_work_mem. Andres thought of an example of some day unifying with tidbitmap.c, and maybe other applications will be limited by work_mem.\n> > > >\n> > > > But now that I'm looking at the guc tables, I am reminded that work_mem's minimum is 64kB, so this highlights a design problem: There is obviously no requirement that the minimum work_mem has to be >= a single DSA segment, even though operations like parallel hash and parallel bitmap heap scan are limited by work_mem.\n> > >\n> > > Right.\n> > >\n> > > > It would be nice to find out what happens with these parallel features when work_mem is tiny (maybe parallelism is not even considered?).\n> > >\n> > > IIUC both don't care about the allocated DSA segment size. Parallel\n> > > hash accounts actual tuple (+ header) size as used memory but doesn't\n> > > consider how much DSA segment is allocated behind. Both parallel hash\n> > > and parallel bitmap scan can work even with work_mem = 64kB, but when\n> > > checking the total DSA segment size allocated during these operations,\n> > > it was 1MB.\n> > >\n> > > I realized that there is a similar memory limit design issue also on\n> > > the non-shared tidstore cases. We deduct 70kB from max_bytes but it\n> > > won't work fine with work_mem = 64kB. Probably we need to reconsider\n> > > it. FYI 70kB comes from the maximum slab block size for node256.\n> >\n> > Currently, we calculate the slab block size enough to allocate 32\n> > chunks from there. For node256, the leaf node is 2,088 bytes and the\n> > slab block size is 66,816 bytes. One idea to fix this issue to\n> > decrease it.\n>\n> I think we're trying to solve the wrong problem here. I need to study this more, but it seems that code that needs to stay within a memory limit only needs to track what's been allocated in chunks within a block, since writing there is what invokes a page fault.\n\nRight. I guess we've discussed what we use for calculating the *used*\nmemory amount but I don't remember.\n\nI think I was confused by the fact that we use some different\napproaches to calculate the amount of used memory. Parallel hash and\ntidbitmap use the allocated chunk size whereas hash_agg_check_limits()\nin nodeAgg.c uses MemoryContextMemAllocated(), which uses the\nallocated block size.\n\n> If we're not keeping track of each and every chunk space, for speed, it doesn't follow that we need to keep every block allocation within the configured limit. I'm guessing we can just ask the context if the block space has gone *over* the limit, and we can assume that the last allocation we perform will only fault one additional page. We need to have a clear answer on this before doing anything else.\n>\n> If that's correct, and I'm not positive yet, we can get rid of all the fragile assumptions about things the tid store has no business knowing about, as well as the guc change.\n\nTrue.\n\n> I'm not sure how this affects progress reporting, because it would be nice if it didn't report dead_tuple_bytes bigger than max_dead_tuple_bytes.\n\nYes, the progress reporting could be confusable. Particularly, in\nshared tidstore cases, the dead_tuple_bytes could be much bigger than\nmax_dead_tuple_bytes. Probably what we need might be functions for\nMemoryContext and dsa_area to get the amount of memory that has been\nallocated, by not tracking every chunk space. For example, the\nfunctions would be like what SlabStats() does; iterate over every\nblock and calculates the total/free memory usage.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 1 Mar 2023 20:58:48 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 1, 2023 at 6:59 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Wed, Mar 1, 2023 at 3:37 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n> >\n> > I think we're trying to solve the wrong problem here. I need to study\nthis more, but it seems that code that needs to stay within a memory limit\nonly needs to track what's been allocated in chunks within a block, since\nwriting there is what invokes a page fault.\n>\n> Right. I guess we've discussed what we use for calculating the *used*\n> memory amount but I don't remember.\n>\n> I think I was confused by the fact that we use some different\n> approaches to calculate the amount of used memory. Parallel hash and\n> tidbitmap use the allocated chunk size whereas hash_agg_check_limits()\n> in nodeAgg.c uses MemoryContextMemAllocated(), which uses the\n> allocated block size.\n\nThat's good to know. The latter says:\n\n * After adding a new group to the hash table, check whether we need to\nenter\n * spill mode. Allocations may happen without adding new groups (for\ninstance,\n * if the transition state size grows), so this check is imperfect.\n\nI'm willing to claim that vacuum can be imperfect also, given the tid\nstore's properties: 1) on average much more efficient in used space, and 2)\nno longer bound by the 1GB limit.\n\n> > I'm not sure how this affects progress reporting, because it would be\nnice if it didn't report dead_tuple_bytes bigger than max_dead_tuple_bytes.\n>\n> Yes, the progress reporting could be confusable. Particularly, in\n> shared tidstore cases, the dead_tuple_bytes could be much bigger than\n> max_dead_tuple_bytes. Probably what we need might be functions for\n> MemoryContext and dsa_area to get the amount of memory that has been\n> allocated, by not tracking every chunk space. For example, the\n> functions would be like what SlabStats() does; iterate over every\n> block and calculates the total/free memory usage.\n\nI'm not sure we need to invent new infrastructure for this. Looking at v29\nin vacuumlazy.c, the order of operations for memory accounting is:\n\nFirst, get the block-level space -- stop and vacuum indexes if we exceed\nthe limit:\n\n/*\n * Consider if we definitely have enough space to process TIDs on page\n * already. If we are close to overrunning the available space for\n * dead_items TIDs, pause and do a cycle of vacuuming before we tackle\n * this page.\n */\nif (TidStoreIsFull(vacrel->dead_items)) --> which is basically \"if\n(TidStoreMemoryUsage(ts) > ts->control->max_bytes)\"\n\nThen, after pruning the current page, store the tids and then get the\nblock-level space again:\n\nelse if (prunestate.num_offsets > 0)\n{\n /* Save details of the LP_DEAD items from the page in dead_items */\n TidStoreSetBlockOffsets(...);\n\n pgstat_progress_update_param(PROGRESS_VACUUM_DEAD_TUPLE_BYTES,\n TidStoreMemoryUsage(dead_items));\n}\n\nSince the block-level measurement is likely overestimating quite a bit, I\npropose to simply reverse the order of the actions here, effectively\nreporting progress for the *last page* and not the current one: First\nupdate progress with the current memory usage, then add tids for this page.\nIf this allocated a new block, only a small bit of that will be written to.\nIf this block pushes it over the limit, we will detect that up at the top\nof the loop. It's kind of like our earlier attempts at a \"fudge factor\",\nbut simpler and less brittle. And, as far as OS pages we have actually\nwritten to, I think it'll effectively respect the memory limit, at least in\nthe local mem case. And the numbers will make sense.\n\nThoughts?\n\nBut now that I'm looking more closely at the details of memory accounting,\nI don't like that TidStoreMemoryUsage() is called twice per page pruned\n(see above). Maybe it wouldn't noticeably slow things down, but it's a bit\nsloppy. It seems like we should call it once per loop and save the result\nsomewhere. If that's the right way to go, that possibly indicates that\nTidStoreIsFull() is not a useful interface, at least in this form.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Wed, Mar 1, 2023 at 6:59 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Wed, Mar 1, 2023 at 3:37 PM John Naylor <john.naylor@enterprisedb.com> wrote:> >> > I think we're trying to solve the wrong problem here. I need to study this more, but it seems that code that needs to stay within a memory limit only needs to track what's been allocated in chunks within a block, since writing there is what invokes a page fault.>> Right. I guess we've discussed what we use for calculating the *used*> memory amount but I don't remember.>> I think I was confused by the fact that we use some different> approaches to calculate the amount of used memory. Parallel hash and> tidbitmap use the allocated chunk size whereas hash_agg_check_limits()> in nodeAgg.c uses MemoryContextMemAllocated(), which uses the> allocated block size.That's good to know. The latter says: * After adding a new group to the hash table, check whether we need to enter * spill mode. Allocations may happen without adding new groups (for instance, * if the transition state size grows), so this check is imperfect.I'm willing to claim that vacuum can be imperfect also, given the tid store's properties: 1) on average much more efficient in used space, and 2) no longer bound by the 1GB limit. > > I'm not sure how this affects progress reporting, because it would be nice if it didn't report dead_tuple_bytes bigger than max_dead_tuple_bytes.>> Yes, the progress reporting could be confusable. Particularly, in> shared tidstore cases, the dead_tuple_bytes could be much bigger than> max_dead_tuple_bytes. Probably what we need might be functions for> MemoryContext and dsa_area to get the amount of memory that has been> allocated, by not tracking every chunk space. For example, the> functions would be like what SlabStats() does; iterate over every> block and calculates the total/free memory usage.I'm not sure we need to invent new infrastructure for this. Looking at v29 in vacuumlazy.c, the order of operations for memory accounting is:First, get the block-level space -- stop and vacuum indexes if we exceed the limit:/* * Consider if we definitely have enough space to process TIDs on page * already.  If we are close to overrunning the available space for * dead_items TIDs, pause and do a cycle of vacuuming before we tackle * this page. */if (TidStoreIsFull(vacrel->dead_items)) --> which is basically \"if (TidStoreMemoryUsage(ts) > ts->control->max_bytes)\"Then, after pruning the current page, store the tids and then get the block-level space again:else if (prunestate.num_offsets > 0){  /* Save details of the LP_DEAD items from the page in dead_items */  TidStoreSetBlockOffsets(...);  pgstat_progress_update_param(PROGRESS_VACUUM_DEAD_TUPLE_BYTES,                               TidStoreMemoryUsage(dead_items));}Since the block-level measurement is likely overestimating quite a bit, I propose to simply reverse the order of the actions here, effectively reporting progress for the *last page* and not the current one: First update progress with the current memory usage, then add tids for this page. If this allocated a new block, only a small bit of that will be written to. If this block pushes it over the limit, we will detect that up at the top of the loop. It's kind of like our earlier attempts at a \"fudge factor\", but simpler and less brittle. And, as far as OS pages we have actually written to, I think it'll effectively respect the memory limit, at least in the local mem case. And the numbers will make sense.Thoughts?But now that I'm looking more closely at the details of memory accounting, I don't like that TidStoreMemoryUsage() is called twice per page pruned (see above). Maybe it wouldn't noticeably slow things down, but it's a bit sloppy. It seems like we should call it once per loop and save the result somewhere. If that's the right way to go, that possibly indicates that TidStoreIsFull() is not a useful interface, at least in this form.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Fri, 3 Mar 2023 18:03:53 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 3, 2023 at 8:04 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> On Wed, Mar 1, 2023 at 6:59 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Wed, Mar 1, 2023 at 3:37 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > I think we're trying to solve the wrong problem here. I need to study this more, but it seems that code that needs to stay within a memory limit only needs to track what's been allocated in chunks within a block, since writing there is what invokes a page fault.\n> >\n> > Right. I guess we've discussed what we use for calculating the *used*\n> > memory amount but I don't remember.\n> >\n> > I think I was confused by the fact that we use some different\n> > approaches to calculate the amount of used memory. Parallel hash and\n> > tidbitmap use the allocated chunk size whereas hash_agg_check_limits()\n> > in nodeAgg.c uses MemoryContextMemAllocated(), which uses the\n> > allocated block size.\n>\n> That's good to know. The latter says:\n>\n> * After adding a new group to the hash table, check whether we need to enter\n> * spill mode. Allocations may happen without adding new groups (for instance,\n> * if the transition state size grows), so this check is imperfect.\n>\n> I'm willing to claim that vacuum can be imperfect also, given the tid store's properties: 1) on average much more efficient in used space, and 2) no longer bound by the 1GB limit.\n>\n> > > I'm not sure how this affects progress reporting, because it would be nice if it didn't report dead_tuple_bytes bigger than max_dead_tuple_bytes.\n> >\n> > Yes, the progress reporting could be confusable. Particularly, in\n> > shared tidstore cases, the dead_tuple_bytes could be much bigger than\n> > max_dead_tuple_bytes. Probably what we need might be functions for\n> > MemoryContext and dsa_area to get the amount of memory that has been\n> > allocated, by not tracking every chunk space. For example, the\n> > functions would be like what SlabStats() does; iterate over every\n> > block and calculates the total/free memory usage.\n>\n> I'm not sure we need to invent new infrastructure for this. Looking at v29 in vacuumlazy.c, the order of operations for memory accounting is:\n>\n> First, get the block-level space -- stop and vacuum indexes if we exceed the limit:\n>\n> /*\n> * Consider if we definitely have enough space to process TIDs on page\n> * already. If we are close to overrunning the available space for\n> * dead_items TIDs, pause and do a cycle of vacuuming before we tackle\n> * this page.\n> */\n> if (TidStoreIsFull(vacrel->dead_items)) --> which is basically \"if (TidStoreMemoryUsage(ts) > ts->control->max_bytes)\"\n>\n> Then, after pruning the current page, store the tids and then get the block-level space again:\n>\n> else if (prunestate.num_offsets > 0)\n> {\n> /* Save details of the LP_DEAD items from the page in dead_items */\n> TidStoreSetBlockOffsets(...);\n>\n> pgstat_progress_update_param(PROGRESS_VACUUM_DEAD_TUPLE_BYTES,\n> TidStoreMemoryUsage(dead_items));\n> }\n>\n> Since the block-level measurement is likely overestimating quite a bit, I propose to simply reverse the order of the actions here, effectively reporting progress for the *last page* and not the current one: First update progress with the current memory usage, then add tids for this page. If this allocated a new block, only a small bit of that will be written to. If this block pushes it over the limit, we will detect that up at the top of the loop. It's kind of like our earlier attempts at a \"fudge factor\", but simpler and less brittle. And, as far as OS pages we have actually written to, I think it'll effectively respect the memory limit, at least in the local mem case. And the numbers will make sense.\n>\n> Thoughts?\n\nIt looks to work but it still doesn't work in a case where a shared\ntidstore is created with a 64kB memory limit, right?\nTidStoreMemoryUsage() returns 1MB and TidStoreIsFull() returns true\nfrom the beginning.\n\nBTW I realized that since the caller can pass dsa_area to tidstore\n(and radix tree), if other data are allocated in the same DSA are,\nTidStoreMemoryUsage() (and RT_MEMORY_USAGE()) returns the memory usage\nthat includes not only itself but also other data. Probably it's\nbetter to comment that the passed dsa_area should be dedicated to a\ntidstore (or a radix tree).\n\n>\n> But now that I'm looking more closely at the details of memory accounting, I don't like that TidStoreMemoryUsage() is called twice per page pruned (see above). Maybe it wouldn't noticeably slow things down, but it's a bit sloppy. It seems like we should call it once per loop and save the result somewhere. If that's the right way to go, that possibly indicates that TidStoreIsFull() is not a useful interface, at least in this form.\n\nAgreed.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 6 Mar 2023 15:27:44 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 6, 2023 at 1:28 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> > Since the block-level measurement is likely overestimating quite a bit,\nI propose to simply reverse the order of the actions here, effectively\nreporting progress for the *last page* and not the current one: First\nupdate progress with the current memory usage, then add tids for this page.\nIf this allocated a new block, only a small bit of that will be written to.\nIf this block pushes it over the limit, we will detect that up at the top\nof the loop. It's kind of like our earlier attempts at a \"fudge factor\",\nbut simpler and less brittle. And, as far as OS pages we have actually\nwritten to, I think it'll effectively respect the memory limit, at least in\nthe local mem case. And the numbers will make sense.\n> >\n> > Thoughts?\n>\n> It looks to work but it still doesn't work in a case where a shared\n> tidstore is created with a 64kB memory limit, right?\n> TidStoreMemoryUsage() returns 1MB and TidStoreIsFull() returns true\n> from the beginning.\n\nI have two ideas:\n\n1. Make it optional to track chunk memory space by a template parameter. It\nmight be tiny compared to everything else that vacuum does. That would\nallow other users to avoid that overhead.\n2. When context block usage exceeds the limit (rare), make the additional\neffort to get the precise usage -- I'm not sure such a top-down facility\nexists, and I'm not feeling well enough today to study this further.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Mar 6, 2023 at 1:28 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> > Since the block-level measurement is likely overestimating quite a bit, I propose to simply reverse the order of the actions here, effectively reporting progress for the *last page* and not the current one: First update progress with the current memory usage, then add tids for this page. If this allocated a new block, only a small bit of that will be written to. If this block pushes it over the limit, we will detect that up at the top of the loop. It's kind of like our earlier attempts at a \"fudge factor\", but simpler and less brittle. And, as far as OS pages we have actually written to, I think it'll effectively respect the memory limit, at least in the local mem case. And the numbers will make sense.> >> > Thoughts?>> It looks to work but it still doesn't work in a case where a shared> tidstore is created with a 64kB memory limit, right?> TidStoreMemoryUsage() returns 1MB and TidStoreIsFull() returns true> from the beginning.I have two ideas:1. Make it optional to track chunk memory space by a template parameter. It might be tiny compared to everything else that vacuum does. That would allow other users to avoid that overhead.2. When context block usage exceeds the limit (rare), make the additional effort to get the precise usage -- I'm not sure such a top-down facility exists, and I'm not feeling well enough today to study this further.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 6 Mar 2023 23:00:52 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Mar 7, 2023 at 1:01 AM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Mar 6, 2023 at 1:28 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > > Since the block-level measurement is likely overestimating quite a bit, I propose to simply reverse the order of the actions here, effectively reporting progress for the *last page* and not the current one: First update progress with the current memory usage, then add tids for this page. If this allocated a new block, only a small bit of that will be written to. If this block pushes it over the limit, we will detect that up at the top of the loop. It's kind of like our earlier attempts at a \"fudge factor\", but simpler and less brittle. And, as far as OS pages we have actually written to, I think it'll effectively respect the memory limit, at least in the local mem case. And the numbers will make sense.\n> > >\n> > > Thoughts?\n> >\n> > It looks to work but it still doesn't work in a case where a shared\n> > tidstore is created with a 64kB memory limit, right?\n> > TidStoreMemoryUsage() returns 1MB and TidStoreIsFull() returns true\n> > from the beginning.\n>\n> I have two ideas:\n>\n> 1. Make it optional to track chunk memory space by a template parameter. It might be tiny compared to everything else that vacuum does. That would allow other users to avoid that overhead.\n> 2. When context block usage exceeds the limit (rare), make the additional effort to get the precise usage -- I'm not sure such a top-down facility exists, and I'm not feeling well enough today to study this further.\n\nI prefer option (1) as it's straight forward. I mentioned a similar\nidea before[1]. RT_MEMORY_USAGE() is defined only when the macro is\ndefined. It might be worth checking if there is visible overhead of\ntracking chunk memory space. IIRC we've not evaluated it yet.\n\n[1] https://www.postgresql.org/message-id/CAD21AoDK3gbX-jVxT6Pfso1Na0Krzr8Q15498Aj6tmXgzMFksA%40mail.gmail.com\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 7 Mar 2023 10:24:32 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Mar 7, 2023 at 8:25 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> > 1. Make it optional to track chunk memory space by a template\nparameter. It might be tiny compared to everything else that vacuum does.\nThat would allow other users to avoid that overhead.\n> > 2. When context block usage exceeds the limit (rare), make the\nadditional effort to get the precise usage -- I'm not sure such a top-down\nfacility exists, and I'm not feeling well enough today to study this\nfurther.\n>\n> I prefer option (1) as it's straight forward. I mentioned a similar\n> idea before[1]. RT_MEMORY_USAGE() is defined only when the macro is\n> defined. It might be worth checking if there is visible overhead of\n> tracking chunk memory space. IIRC we've not evaluated it yet.\n\nOk, let's try this -- I can test and profile later this week.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Tue, Mar 7, 2023 at 8:25 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> > 1. Make it optional to track chunk memory space by a template parameter. It might be tiny compared to everything else that vacuum does. That would allow other users to avoid that overhead.> > 2. When context block usage exceeds the limit (rare), make the additional effort to get the precise usage -- I'm not sure such a top-down facility exists, and I'm not feeling well enough today to study this further.>> I prefer option (1) as it's straight forward. I mentioned a similar> idea before[1]. RT_MEMORY_USAGE() is defined only when the macro is> defined. It might be worth checking if there is visible overhead of> tracking chunk memory space. IIRC we've not evaluated it yet.Ok, let's try this -- I can test and profile later this week.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 8 Mar 2023 11:40:09 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 8, 2023 at 1:40 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n>\n\n> On Tue, Mar 7, 2023 at 8:25 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > > 1. Make it optional to track chunk memory space by a template parameter. It might be tiny compared to everything else that vacuum does. That would allow other users to avoid that overhead.\n> > > 2. When context block usage exceeds the limit (rare), make the additional effort to get the precise usage -- I'm not sure such a top-down facility exists, and I'm not feeling well enough today to study this further.\n> >\n> > I prefer option (1) as it's straight forward. I mentioned a similar\n> > idea before[1]. RT_MEMORY_USAGE() is defined only when the macro is\n> > defined. It might be worth checking if there is visible overhead of\n> > tracking chunk memory space. IIRC we've not evaluated it yet.\n>\n> Ok, let's try this -- I can test and profile later this week.\n\nThanks!\n\nI've attached the new version patches. I merged improvements and fixes\nI did in the v29 patch. 0007 through 0010 are updates from v29. The\nmain change made in v30 is to make the memory measurement and\nRT_MEMORY_USAGE() optional, which is done in 0007 patch. The 0008 and\n0009 patches are the updates for tidstore and the vacuum integration\npatches. Here are results of quick tests (an average of 3 executions):\n\nquery: select * from bench_load_random_int(10 * 1000 * 1000)\n\n* w/ RT_MEASURE_MEMORY_USAGE:\n mem_allocated | load_ms\n---------------+---------\n 1996512000 | 3305\n(1 row)\n\n* w/o RT_MEASURE_MEMORY_USAGE:\n mem_allocated | load_ms\n---------------+---------\n 0 | 3258\n(1 row)\n\nIt seems to be within a noise level but I agree to make it optional.\n\nApart from the memory measurement stuff, I've done another todo item\non my list; adding min max classes for node3 and node125. I've done\nthat in 0010 patch, and here is a quick test result:\n\nquery: select * from bench_load_random_int(10 * 1000 * 1000)\n\n* w/ 0000 patch\n mem_allocated | load_ms\n---------------+---------\n 1268630080 | 3275\n(1 row)\n\n* w/o 0000 patch\n mem_allocated | load_ms\n---------------+---------\n 1996512000 | 3214\n(1 row)\n\nThat's a good improvement on the memory usage, without a noticeable\nperformance overhead. FYI CLASS_3_MIN has 1 fanout and is 24 bytes in\nsize, and CLASS_125_MIN has 61 fanouts and is 768 bytes in size.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 9 Mar 2023 15:51:05 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 9, 2023 at 1:51 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> I've attached the new version patches. I merged improvements and fixes\n> I did in the v29 patch.\n\nI haven't yet had a chance to look at those closely, since I've had to\ndevote time to other commitments. I remember I wasn't particularly\nimpressed that v29-0008 mixed my requested name-casing changes with a bunch\nof other random things. Separating those out would be an obvious way to\nmake it easier for me to look at, whenever I can get back to this. I need\nto look at the iteration changes as well, in addition to testing memory\nmeasurement (thanks for the new results, they look encouraging).\n\n> Apart from the memory measurement stuff, I've done another todo item\n> on my list; adding min max classes for node3 and node125. I've done\n\nThis didn't help us move us closer to something committable the first time\nyou coded this without making sure it was a good idea. It's still not\nhelping and arguably makes it worse. To be fair, I did speak positively\nabout _considering_ additional size classes some months ago, but that has a\nvery obvious maintenance cost, something we can least afford right now.\n\nI'm frankly baffled you thought this was important enough to work on again,\nyet thought it was a waste of time to try to prove to ourselves that\nautovacuum in a realistic, non-deterministic workload gave the same answer\nas the current tid lookup. Even if we had gone that far, it doesn't seem\nlike a good idea to add non-essential code to critical paths right now.\n\nWe're rapidly running out of time, and we're at the point in the cycle\nwhere it's impossible to get meaningful review from anyone not already\nintimately familiar with the patch series. I only want to see progress on\naddressing possible (especially architectural) objections from the\ncommunity, because if they don't notice them now, they surely will after\ncommit. I have my own list of possible objections as well as bikeshedding\npoints, which I'll clean up and share next week. I plan to invite Andres to\nlook at that list and give his impressions, because it's a lot quicker than\nreading the patches. Based on that, I'll hopefully be able to decide\nwhether we have enough time to address any feedback and do remaining\npolishing in time for feature freeze.\n\nI'd suggest sharing your todo list in the meanwhile, it'd be good to\ndiscuss what's worth doing and what is not.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Mar 9, 2023 at 1:51 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> I've attached the new version patches. I merged improvements and fixes> I did in the v29 patch.I haven't yet had a chance to look at those closely, since I've had to devote time to other commitments. I remember I wasn't particularly impressed that v29-0008 mixed my requested name-casing changes with a bunch of other random things. Separating those out would be an obvious way to make it easier for me to look at, whenever I can get back to this. I need to look at the iteration changes as well, in addition to testing memory measurement (thanks for the new results, they look encouraging).> Apart from the memory measurement stuff, I've done another todo item> on my list; adding min max classes for node3 and node125. I've doneThis didn't help us move us closer to something committable the first time you coded this without making sure it was a good idea. It's still not helping and arguably makes it worse. To be fair, I did speak positively about _considering_ additional size classes some months ago, but that has a very obvious maintenance cost, something we can least afford right now.I'm frankly baffled you thought this was important enough to work on again, yet thought it was a waste of time to try to prove to ourselves that autovacuum in a realistic, non-deterministic workload gave the same answer as the current tid lookup. Even if we had gone that far, it doesn't seem like a good idea to add non-essential code to critical paths right now.We're rapidly running out of time, and we're at the point in the cycle where it's impossible to get meaningful review from anyone not already intimately familiar with the patch series. I only want to see progress on addressing possible (especially architectural) objections from the community, because if they don't notice them now, they surely will after commit. I have my own list of possible objections as well as bikeshedding points, which I'll clean up and share next week. I plan to invite Andres to look at that list and give his impressions, because it's a lot quicker than reading the patches. Based on that, I'll hopefully be able to decide whether we have enough time to address any feedback and do remaining polishing in time for feature freeze.I'd suggest sharing your todo list in the meanwhile, it'd be good to discuss what's worth doing and what is not.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Fri, 10 Mar 2023 13:42:33 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 10, 2023 at 3:42 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Thu, Mar 9, 2023 at 1:51 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I've attached the new version patches. I merged improvements and fixes\n> > I did in the v29 patch.\n>\n> I haven't yet had a chance to look at those closely, since I've had to devote time to other commitments. I remember I wasn't particularly impressed that v29-0008 mixed my requested name-casing changes with a bunch of other random things. Separating those out would be an obvious way to make it easier for me to look at, whenever I can get back to this. I need to look at the iteration changes as well, in addition to testing memory measurement (thanks for the new results, they look encouraging).\n\nOkay, I'll separate them again.\n\n>\n> > Apart from the memory measurement stuff, I've done another todo item\n> > on my list; adding min max classes for node3 and node125. I've done\n>\n> This didn't help us move us closer to something committable the first time you coded this without making sure it was a good idea. It's still not helping and arguably makes it worse. To be fair, I did speak positively about _considering_ additional size classes some months ago, but that has a very obvious maintenance cost, something we can least afford right now.\n>\n> I'm frankly baffled you thought this was important enough to work on again, yet thought it was a waste of time to try to prove to ourselves that autovacuum in a realistic, non-deterministic workload gave the same answer as the current tid lookup. Even if we had gone that far, it doesn't seem like a good idea to add non-essential code to critical paths right now.\n\nI didn't think that proving tidstore and the current tid lookup return\nthe same result was a waste of time. I've shared a patch to do that in\ntidstore before. I agreed not to add it to the tree but we can test\nthat using this patch. Actually I've done a test that ran pgbench\nworkload for a few days.\n\nIIUC it's still important to consider whether to have node1 since it\ncould be a good alternative for the path compression. The prototype\nalso implemented it. Of course we can leave it for future improvement.\nBut considering this item with the performance tests helps us to prove\nour decoupling approach is promising.\n\n> We're rapidly running out of time, and we're at the point in the cycle where it's impossible to get meaningful review from anyone not already intimately familiar with the patch series. I only want to see progress on addressing possible (especially architectural) objections from the community, because if they don't notice them now, they surely will after commit.\n\nRight, we've been making many design decisions. Some of them are\nagreed just between you and me and some are agreed with other hackers.\nThere are some irrevertible design decisions due to the remaining\ntime.\n\n> I have my own list of possible objections as well as bikeshedding points, which I'll clean up and share next week.\n\nThanks.\n\n> I plan to invite Andres to look at that list and give his impressions, because it's a lot quicker than reading the patches. Based on that, I'll hopefully be able to decide whether we have enough time to address any feedback and do remaining polishing in time for feature freeze.\n>\n> I'd suggest sharing your todo list in the meanwhile, it'd be good to discuss what's worth doing and what is not.\n\nApart from more rounds of reviews and tests, my todo items that need\ndiscussion and possibly implementation are:\n\n* The memory measurement in radix trees and the memory limit in\ntidstores. I've implemented it in v30-0007 through 0009 but we need to\nreview it. This is the highest priority for me.\n\n* Additional size classes. It's important for an alternative of path\ncompression as well as supporting our decoupling approach. Middle\npriority.\n\n* Node shrinking support. Low priority.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 10 Mar 2023 23:30:04 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 10, 2023 at 11:30 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Fri, Mar 10, 2023 at 3:42 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > On Thu, Mar 9, 2023 at 1:51 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > I've attached the new version patches. I merged improvements and fixes\n> > > I did in the v29 patch.\n> >\n> > I haven't yet had a chance to look at those closely, since I've had to devote time to other commitments. I remember I wasn't particularly impressed that v29-0008 mixed my requested name-casing changes with a bunch of other random things. Separating those out would be an obvious way to make it easier for me to look at, whenever I can get back to this. I need to look at the iteration changes as well, in addition to testing memory measurement (thanks for the new results, they look encouraging).\n>\n> Okay, I'll separate them again.\n\nAttached new patch series. In addition to separate them again, I've\nfixed a conflict with HEAD.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Sat, 11 Mar 2023 00:26:18 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 10, 2023 at 9:30 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Fri, Mar 10, 2023 at 3:42 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\n> > I'd suggest sharing your todo list in the meanwhile, it'd be good to\ndiscuss what's worth doing and what is not.\n>\n> Apart from more rounds of reviews and tests, my todo items that need\n> discussion and possibly implementation are:\n\nQuick thoughts on these:\n\n> * The memory measurement in radix trees and the memory limit in\n> tidstores. I've implemented it in v30-0007 through 0009 but we need to\n> review it. This is the highest priority for me.\n\nAgreed.\n\n> * Additional size classes. It's important for an alternative of path\n> compression as well as supporting our decoupling approach. Middle\n> priority.\n\nI'm going to push back a bit and claim this doesn't bring much gain, while\nit does have a complexity cost. The node1 from Andres's prototype is 32\nbytes in size, same as our node3, so it's roughly equivalent as a way to\nameliorate the lack of path compression. I say \"roughly\" because the loop\nin node3 is probably noticeably slower. A new size class will by definition\nstill use that loop.\n\nAbout a smaller node125-type class: I'm actually not even sure we need to\nhave any sub-max node bigger about 64 (node size 768 bytes). I'd just let\n65+ go to the max node -- there won't be many of them, at least in\nsynthetic workloads we've seen so far.\n\n> * Node shrinking support. Low priority.\n\nThis is an architectural wart that's been neglected since the tid store\ndoesn't perform deletion. We'll need it sometime. If we're not going to\nmake this work, why ship a deletion API at all?\n\nI took a look at this a couple weeks ago, and fixing it wouldn't be that\nhard. I even had an idea of how to detect when to shrink size class within\na node kind, while keeping the header at 5 bytes. I'd be willing to put\neffort into that, but to have a chance of succeeding, I'm unwilling to make\nit more difficult by adding more size classes at this point.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Fri, Mar 10, 2023 at 9:30 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Fri, Mar 10, 2023 at 3:42 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> > I'd suggest sharing your todo list in the meanwhile, it'd be good to discuss what's worth doing and what is not.>> Apart from more rounds of reviews and tests, my todo items that need> discussion and possibly implementation are:Quick thoughts on these:> * The memory measurement in radix trees and the memory limit in> tidstores. I've implemented it in v30-0007 through 0009 but we need to> review it. This is the highest priority for me.Agreed.> * Additional size classes. It's important for an alternative of path> compression as well as supporting our decoupling approach. Middle> priority.I'm going to push back a bit and claim this doesn't bring much gain, while it does have a complexity cost. The node1 from Andres's prototype is 32 bytes in size, same as our node3, so it's roughly equivalent as a way to ameliorate the lack of path compression. I say \"roughly\" because the loop in node3 is probably noticeably slower. A new size class will by definition still use that loop.About a smaller node125-type class: I'm actually not even sure we need to have any sub-max node bigger about 64 (node size 768 bytes). I'd just let 65+ go to the max node -- there won't be many of them, at least in synthetic workloads we've seen so far.> * Node shrinking support. Low priority.This is an architectural wart that's been neglected since the tid store doesn't perform deletion. We'll need it sometime. If we're not going to make this work, why ship a deletion API at all?I took a look at this a couple weeks ago, and fixing it wouldn't be that hard. I even had an idea of how to detect when to shrink size class within a node kind, while keeping the header at 5 bytes. I'd be willing to put effort into that, but to have a chance of succeeding, I'm unwilling to make it more difficult by adding more size classes at this point.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Sat, 11 Mar 2023 22:54:40 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sun, Mar 12, 2023 at 12:54 AM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Fri, Mar 10, 2023 at 9:30 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Fri, Mar 10, 2023 at 3:42 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n>\n> > > I'd suggest sharing your todo list in the meanwhile, it'd be good to discuss what's worth doing and what is not.\n> >\n> > Apart from more rounds of reviews and tests, my todo items that need\n> > discussion and possibly implementation are:\n>\n> Quick thoughts on these:\n>\n> > * The memory measurement in radix trees and the memory limit in\n> > tidstores. I've implemented it in v30-0007 through 0009 but we need to\n> > review it. This is the highest priority for me.\n>\n> Agreed.\n>\n> > * Additional size classes. It's important for an alternative of path\n> > compression as well as supporting our decoupling approach. Middle\n> > priority.\n>\n> I'm going to push back a bit and claim this doesn't bring much gain, while it does have a complexity cost. The node1 from Andres's prototype is 32 bytes in size, same as our node3, so it's roughly equivalent as a way to ameliorate the lack of path compression.\n\nBut does it mean that our node1 would help reduce the memory further\nsince since our base node type (i.e. RT_NODE) is smaller than the base\nnode type of Andres's prototype? The result I shared before showed\n1.2GB vs. 1.9GB.\n\n> I say \"roughly\" because the loop in node3 is probably noticeably slower. A new size class will by definition still use that loop.\n\nI've evaluated the performance of node1 but the result seems to show\nthe opposite. I used the test query:\n\nselect * from bench_search_random_nodes(100 * 1000 * 1000,\n'0xFF000000000000FF');\n\nWhich make the radix tree that has node1 like:\n\nmax_val = 18446744073709551615\nnum_keys = 65536\nheight = 7, n1 = 1536, n3 = 0, n15 = 0, n32 = 0, n61 = 0, n256 = 257\n\nAll internal nodes except for the root node are node1. The radix tree\nthat doesn't have node1 is:\n\nmax_val = 18446744073709551615\nnum_keys = 65536\nheight = 7, n3 = 1536, n15 = 0, n32 = 0, n125 = 0, n256 = 257\n\nHere is the result:\n\n* w/ node1\n mem_allocated | load_ms | search_ms\n---------------+---------+-----------\n 573448 | 1848 | 1707\n(1 row)\n\n* w/o node1\n mem_allocated | load_ms | search_ms\n---------------+---------+-----------\n 598024 | 2014 | 1825\n(1 row)\n\nAm I missing something?\n\n>\n> About a smaller node125-type class: I'm actually not even sure we need to have any sub-max node bigger about 64 (node size 768 bytes). I'd just let 65+ go to the max node -- there won't be many of them, at least in synthetic workloads we've seen so far.\n\nMakes sense to me.\n\n>\n> > * Node shrinking support. Low priority.\n>\n> This is an architectural wart that's been neglected since the tid store doesn't perform deletion. We'll need it sometime. If we're not going to make this work, why ship a deletion API at all?\n>\n> I took a look at this a couple weeks ago, and fixing it wouldn't be that hard. I even had an idea of how to detect when to shrink size class within a node kind, while keeping the header at 5 bytes. I'd be willing to put effort into that, but to have a chance of succeeding, I'm unwilling to make it more difficult by adding more size classes at this point.\n\nI think that the deletion (and locking support) doesn't have use cases\nin the core (i.e. tidstore) but is implemented so that external\nextensions can use it. There might not be such extensions. Given the\nlack of use cases in the core (and the rest of time), I think it's\nokay even if the implementation of such API is minimal and not\noptimized enough. For instance, the implementation of dshash.c is\nminimalist, and doesn't have resizing. We can improve them in the\nfuture if extensions or other core features want.\n\nPersonally I think we should focus on addressing feedback that we\nwould get and improving the existing use cases for the rest of time.\nThat's why considering min-max size class has a higher priority than\nthe node shrinking support in my todo list.\n\nFYI, I've run TPC-C workload over the weekend, and didn't get any\nfailures of the assertion proving tidstore and the current tid lookup\nreturn the same result.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 13 Mar 2023 10:41:16 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 13, 2023 at 8:41 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Sun, Mar 12, 2023 at 12:54 AM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > On Fri, Mar 10, 2023 at 9:30 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> > > * Additional size classes. It's important for an alternative of path\n> > > compression as well as supporting our decoupling approach. Middle\n> > > priority.\n> >\n> > I'm going to push back a bit and claim this doesn't bring much gain,\nwhile it does have a complexity cost. The node1 from Andres's prototype is\n32 bytes in size, same as our node3, so it's roughly equivalent as a way to\nameliorate the lack of path compression.\n>\n> But does it mean that our node1 would help reduce the memory further\n> since since our base node type (i.e. RT_NODE) is smaller than the base\n> node type of Andres's prototype? The result I shared before showed\n> 1.2GB vs. 1.9GB.\n\nThe benefit is found in a synthetic benchmark with random integers. I\nhighly doubt that anyone would be willing to force us to keep\nbinary-searching the 1GB array for one more cycle on account of not adding\na size class here. I'll repeat myself and say that there are also\nmaintenance costs.\n\nIn contrast, I'm fairly certain that our attempts thus far at memory\naccounting/limiting are not quite up to par, and lacking enough to\njeopardize the feature. We're already discussing that, so I'll say no more.\n\n> > I say \"roughly\" because the loop in node3 is probably noticeably\nslower. A new size class will by definition still use that loop.\n>\n> I've evaluated the performance of node1 but the result seems to show\n> the opposite.\n\nAs an aside, I meant the loop in our node3 might make your node1 slower\nthan the prototype's node1, which was coded for 1 member only.\n\n> > > * Node shrinking support. Low priority.\n> >\n> > This is an architectural wart that's been neglected since the tid store\ndoesn't perform deletion. We'll need it sometime. If we're not going to\nmake this work, why ship a deletion API at all?\n> >\n> > I took a look at this a couple weeks ago, and fixing it wouldn't be\nthat hard. I even had an idea of how to detect when to shrink size class\nwithin a node kind, while keeping the header at 5 bytes. I'd be willing to\nput effort into that, but to have a chance of succeeding, I'm unwilling to\nmake it more difficult by adding more size classes at this point.\n>\n> I think that the deletion (and locking support) doesn't have use cases\n> in the core (i.e. tidstore) but is implemented so that external\n> extensions can use it.\n\nI think these cases are a bit different: Doing anything with a data\nstructure stored in shared memory without a synchronization scheme is\ncompletely unthinkable and insane. I'm not yet sure if\ndeleting-without-shrinking is a showstopper, or if it's preferable in v16\nthan no deletion at all.\n\nAnything we don't implement now is a limit on future use cases, and thus a\ncause for objection. On the other hand, anything we implement also\nrepresents more stuff that will have to be rewritten for high-concurrency.\n\n> FYI, I've run TPC-C workload over the weekend, and didn't get any\n> failures of the assertion proving tidstore and the current tid lookup\n> return the same result.\n\nGreat!\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Mar 13, 2023 at 8:41 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Sun, Mar 12, 2023 at 12:54 AM John Naylor> <john.naylor@enterprisedb.com> wrote:> >> > On Fri, Mar 10, 2023 at 9:30 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> > > * Additional size classes. It's important for an alternative of path> > > compression as well as supporting our decoupling approach. Middle> > > priority.> >> > I'm going to push back a bit and claim this doesn't bring much gain, while it does have a complexity cost. The node1 from Andres's prototype is 32 bytes in size, same as our node3, so it's roughly equivalent as a way to ameliorate the lack of path compression.>> But does it mean that our node1 would help reduce the memory further> since since our base node type (i.e. RT_NODE) is smaller than the base> node type of Andres's prototype? The result I shared before showed> 1.2GB vs. 1.9GB.The benefit is found in a synthetic benchmark with random integers. I highly doubt that anyone would be willing to force us to keep binary-searching the 1GB array for one more cycle on account of not adding a size class here. I'll repeat myself and say that there are also maintenance costs.In contrast, I'm fairly certain that our attempts thus far at memory accounting/limiting are not quite up to par, and lacking enough to jeopardize the feature. We're already discussing that, so I'll say no more.> > I say \"roughly\" because the loop in node3 is probably noticeably slower. A new size class will by definition still use that loop.>> I've evaluated the performance of node1 but the result seems to show> the opposite.As an aside, I meant the loop in our node3 might make your node1 slower than the prototype's node1, which was coded for 1 member only. > > > * Node shrinking support. Low priority.> >> > This is an architectural wart that's been neglected since the tid store doesn't perform deletion. We'll need it sometime. If we're not going to make this work, why ship a deletion API at all?> >> > I took a look at this a couple weeks ago, and fixing it wouldn't be that hard. I even had an idea of how to detect when to shrink size class within a node kind, while keeping the header at 5 bytes. I'd be willing to put effort into that, but to have a chance of succeeding, I'm unwilling to make it more difficult by adding more size classes at this point.>> I think that the deletion (and locking support) doesn't have use cases> in the core (i.e. tidstore) but is implemented so that external> extensions can use it. I think these cases are a bit different: Doing anything with a data structure stored in shared memory without a synchronization scheme is completely unthinkable and insane. I'm not yet sure if deleting-without-shrinking is a showstopper, or if it's preferable in v16 than no deletion at all.Anything we don't implement now is a limit on future use cases, and thus a cause for objection. On the other hand, anything we implement also represents more stuff that will have to be rewritten for high-concurrency.> FYI, I've run TPC-C workload over the weekend, and didn't get any> failures of the assertion proving tidstore and the current tid lookup> return the same result.Great!--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 13 Mar 2023 20:28:09 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 13, 2023 at 10:28 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Mar 13, 2023 at 8:41 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Sun, Mar 12, 2023 at 12:54 AM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > On Fri, Mar 10, 2023 at 9:30 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > > > * Additional size classes. It's important for an alternative of path\n> > > > compression as well as supporting our decoupling approach. Middle\n> > > > priority.\n> > >\n> > > I'm going to push back a bit and claim this doesn't bring much gain, while it does have a complexity cost. The node1 from Andres's prototype is 32 bytes in size, same as our node3, so it's roughly equivalent as a way to ameliorate the lack of path compression.\n> >\n> > But does it mean that our node1 would help reduce the memory further\n> > since since our base node type (i.e. RT_NODE) is smaller than the base\n> > node type of Andres's prototype? The result I shared before showed\n> > 1.2GB vs. 1.9GB.\n>\n> The benefit is found in a synthetic benchmark with random integers. I highly doubt that anyone would be willing to force us to keep binary-searching the 1GB array for one more cycle on account of not adding a size class here. I'll repeat myself and say that there are also maintenance costs.\n>\n> In contrast, I'm fairly certain that our attempts thus far at memory accounting/limiting are not quite up to par, and lacking enough to jeopardize the feature. We're already discussing that, so I'll say no more.\n\nI agree that memory accounting/limiting stuff is the highest priority.\nSo what kinds of size classes do you think we need? node3, 15, 32, 61\nand 256?\n\n>\n> > > I say \"roughly\" because the loop in node3 is probably noticeably slower. A new size class will by definition still use that loop.\n> >\n> > I've evaluated the performance of node1 but the result seems to show\n> > the opposite.\n>\n> As an aside, I meant the loop in our node3 might make your node1 slower than the prototype's node1, which was coded for 1 member only.\n\nAgreed.\n\n>\n> > > > * Node shrinking support. Low priority.\n> > >\n> > > This is an architectural wart that's been neglected since the tid store doesn't perform deletion. We'll need it sometime. If we're not going to make this work, why ship a deletion API at all?\n> > >\n> > > I took a look at this a couple weeks ago, and fixing it wouldn't be that hard. I even had an idea of how to detect when to shrink size class within a node kind, while keeping the header at 5 bytes. I'd be willing to put effort into that, but to have a chance of succeeding, I'm unwilling to make it more difficult by adding more size classes at this point.\n> >\n> > I think that the deletion (and locking support) doesn't have use cases\n> > in the core (i.e. tidstore) but is implemented so that external\n> > extensions can use it.\n>\n> I think these cases are a bit different: Doing anything with a data structure stored in shared memory without a synchronization scheme is completely unthinkable and insane.\n\nRight.\n\n> I'm not yet sure if deleting-without-shrinking is a showstopper, or if it's preferable in v16 than no deletion at all.\n>\n> Anything we don't implement now is a limit on future use cases, and thus a cause for objection. On the other hand, anything we implement also represents more stuff that will have to be rewritten for high-concurrency.\n\nOkay. Given that adding shrinking support also requires maintenance\ncosts (and probably new test cases?) and there are no use cases in the\ncore, I'm not sure it's worth supporting it at this stage. So I prefer\neither shipping the deletion API as it is and removing the deletion\nAPI. I think that it's a discussion point that we'd like to hear\nfeedback from other hackers.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 13 Mar 2023 23:55:29 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I wrote:\n\n> > > Since the block-level measurement is likely overestimating quite a\nbit, I propose to simply reverse the order of the actions here, effectively\nreporting progress for the *last page* and not the current one: First\nupdate progress with the current memory usage, then add tids for this page.\nIf this allocated a new block, only a small bit of that will be written to.\nIf this block pushes it over the limit, we will detect that up at the top\nof the loop. It's kind of like our earlier attempts at a \"fudge factor\",\nbut simpler and less brittle. And, as far as OS pages we have actually\nwritten to, I think it'll effectively respect the memory limit, at least in\nthe local mem case. And the numbers will make sense.\n> > >\n> > > Thoughts?\n> >\n> > It looks to work but it still doesn't work in a case where a shared\n> > tidstore is created with a 64kB memory limit, right?\n> > TidStoreMemoryUsage() returns 1MB and TidStoreIsFull() returns true\n> > from the beginning.\n>\n> I have two ideas:\n>\n> 1. Make it optional to track chunk memory space by a template parameter.\nIt might be tiny compared to everything else that vacuum does. That would\nallow other users to avoid that overhead.\n> 2. When context block usage exceeds the limit (rare), make the additional\neffort to get the precise usage -- I'm not sure such a top-down facility\nexists, and I'm not feeling well enough today to study this further.\n\nSince then, Masahiko incorporated #1 into v31, and that's what I'm looking\nat now. Unfortunately, If I had spent five minutes reminding myself what\nthe original objections were to this approach, I could have saved us some\neffort. Back in July (!), Andres raised two points: GetMemoryChunkSpace()\nis slow [1], and fragmentation [2] (leading to underestimation).\n\nIn v31, in the local case at least, the underestimation is actually worse\nthan tracking chunk space, since it ignores chunk header and alignment.\nI'm not sure about the DSA case. This doesn't seem great.\n\nIt shouldn't be a surprise why a simple increment of raw allocation size is\ncomparable in speed -- GetMemoryChunkSpace() calls the right function\nthrough a pointer, which is slower. If we were willing to underestimate for\nthe sake of speed, that takes away the reason for making memory tracking\noptional.\n\nFurther, if the option is not specified, in v31 there is no way to get the\nmemory use at all, which seems odd. Surely the caller should be able to ask\nthe context/area, if it wants to.\n\nI still like my idea at the top of the page -- at least for vacuum and\nm_w_m. It's still not completely clear if it's right but I've got nothing\nbetter. It also ignores the work_mem issue, but I've given up anticipating\nall future cases at the moment.\n\nI'll put this item and a couple other things together in a separate email\ntomorrow.\n\n[1]\nhttps://www.postgresql.org/message-id/20220704211822.kfxtzpcdmslzm2dy%40awork3.anarazel.de\n[2]\nhttps://www.postgresql.org/message-id/20220704220038.at2ane5xkymzzssb%40awork3.anarazel.de\n\n-- \nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nI wrote:> > > Since the block-level measurement is likely overestimating quite a bit, I propose to simply reverse the order of the actions here, effectively reporting progress for the *last page* and not the current one: First update progress with the current memory usage, then add tids for this page. If this allocated a new block, only a small bit of that will be written to. If this block pushes it over the limit, we will detect that up at the top of the loop. It's kind of like our earlier attempts at a \"fudge factor\", but simpler and less brittle. And, as far as OS pages we have actually written to, I think it'll effectively respect the memory limit, at least in the local mem case. And the numbers will make sense.> > >> > > Thoughts?> >> > It looks to work but it still doesn't work in a case where a shared> > tidstore is created with a 64kB memory limit, right?> > TidStoreMemoryUsage() returns 1MB and TidStoreIsFull() returns true> > from the beginning.>> I have two ideas:>> 1. Make it optional to track chunk memory space by a template parameter. It might be tiny compared to everything else that vacuum does. That would allow other users to avoid that overhead.> 2. When context block usage exceeds the limit (rare), make the additional effort to get the precise usage -- I'm not sure such a top-down facility exists, and I'm not feeling well enough today to study this further.Since then, Masahiko incorporated #1 into v31, and that's what I'm looking at now. Unfortunately, If I had spent five minutes reminding myself what the original objections were to this approach, I could have saved us some effort. Back in July (!), Andres raised two points: GetMemoryChunkSpace() is slow [1], and fragmentation [2] (leading to underestimation).In v31, in the local case at least, the underestimation is actually worse than tracking chunk space, since it ignores chunk header and alignment.  I'm not sure about the DSA case. This doesn't seem great.It shouldn't be a surprise why a simple increment of raw allocation size is comparable in speed -- GetMemoryChunkSpace() calls the right function through a pointer, which is slower. If we were willing to underestimate for the sake of speed, that takes away the reason for making memory tracking optional.Further, if the option is not specified, in v31 there is no way to get the memory use at all, which seems odd. Surely the caller should be able to ask the context/area, if it wants to.I still like my idea at the top of the page -- at least for vacuum and m_w_m. It's still not completely clear if it's right but I've got nothing better. It also ignores the work_mem issue, but I've given up anticipating all future cases at the moment.I'll put this item and a couple other things together in a separate email tomorrow.[1] https://www.postgresql.org/message-id/20220704211822.kfxtzpcdmslzm2dy%40awork3.anarazel.de[2] https://www.postgresql.org/message-id/20220704220038.at2ane5xkymzzssb%40awork3.anarazel.de-- John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Tue, 14 Mar 2023 18:27:37 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Mar 14, 2023 at 8:27 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> I wrote:\n>\n> > > > Since the block-level measurement is likely overestimating quite a bit, I propose to simply reverse the order of the actions here, effectively reporting progress for the *last page* and not the current one: First update progress with the current memory usage, then add tids for this page. If this allocated a new block, only a small bit of that will be written to. If this block pushes it over the limit, we will detect that up at the top of the loop. It's kind of like our earlier attempts at a \"fudge factor\", but simpler and less brittle. And, as far as OS pages we have actually written to, I think it'll effectively respect the memory limit, at least in the local mem case. And the numbers will make sense.\n> > > >\n> > > > Thoughts?\n> > >\n> > > It looks to work but it still doesn't work in a case where a shared\n> > > tidstore is created with a 64kB memory limit, right?\n> > > TidStoreMemoryUsage() returns 1MB and TidStoreIsFull() returns true\n> > > from the beginning.\n> >\n> > I have two ideas:\n> >\n> > 1. Make it optional to track chunk memory space by a template parameter. It might be tiny compared to everything else that vacuum does. That would allow other users to avoid that overhead.\n> > 2. When context block usage exceeds the limit (rare), make the additional effort to get the precise usage -- I'm not sure such a top-down facility exists, and I'm not feeling well enough today to study this further.\n>\n> Since then, Masahiko incorporated #1 into v31, and that's what I'm looking at now. Unfortunately, If I had spent five minutes reminding myself what the original objections were to this approach, I could have saved us some effort. Back in July (!), Andres raised two points: GetMemoryChunkSpace() is slow [1], and fragmentation [2] (leading to underestimation).\n>\n> In v31, in the local case at least, the underestimation is actually worse than tracking chunk space, since it ignores chunk header and alignment. I'm not sure about the DSA case. This doesn't seem great.\n\nRight.\n\n>\n> It shouldn't be a surprise why a simple increment of raw allocation size is comparable in speed -- GetMemoryChunkSpace() calls the right function through a pointer, which is slower. If we were willing to underestimate for the sake of speed, that takes away the reason for making memory tracking optional.\n>\n> Further, if the option is not specified, in v31 there is no way to get the memory use at all, which seems odd. Surely the caller should be able to ask the context/area, if it wants to.\n\nThere are precedents that don't provide a way to return memory usage,\nsuch as simplehash.h and dshash.c.\n\n>\n> I still like my idea at the top of the page -- at least for vacuum and m_w_m. It's still not completely clear if it's right but I've got nothing better. It also ignores the work_mem issue, but I've given up anticipating all future cases at the moment.\n>\n\nWhat does it mean by \"the precise usage\" in your idea? Quoting from\nthe email you referred to, Andres said:\n\n---\nOne thing I was wondering about is trying to choose node types in\nroughly-power-of-two struct sizes. It's pretty easy to end up with significant\nfragmentation in the slabs right now when inserting as you go, because some of\nthe smaller node types will be freed but not enough to actually free blocks of\nmemory. If we instead have ~power-of-two sizes we could just use a single slab\nof the max size, and carve out the smaller node types out of that largest\nallocation.\n\nBtw, that fragmentation is another reason why I think it's better to track\nmemory usage via memory contexts, rather than doing so based on\nGetMemoryChunkSpace().\n---\n\nIIUC he suggested measuring memory usage in block-level in order to\ncount blocks that are not actually freed but some of its chunks are\nfreed. That's why we used MemoryContextMemAllocated(). On the other\nhand, recently you pointed out[1]:\n\n---\nI think we're trying to solve the wrong problem here. I need to study\nthis more, but it seems that code that needs to stay within a memory\nlimit only needs to track what's been allocated in chunks within a\nblock, since writing there is what invokes a page fault.\n---\n\nIIUC you suggested measuring memory usage by tracking how much memory\nchunks are allocated within a block. If your idea at the top of the\npage follows this method, it still doesn't deal with the point Andres\nmentioned.\n\n> I'll put this item and a couple other things together in a separate email tomorrow.\n\nThanks!\n\nRegards,\n\n[1] https://www.postgresql.org/message-id/CAFBsxsEnzivaJ13iCGdDoUMsXJVGOaahuBe_y%3Dq6ow%3DLTzyDvA%40mail.gmail.com\n\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 15 Mar 2023 11:32:06 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 15, 2023 at 9:32 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Tue, Mar 14, 2023 at 8:27 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > I wrote:\n> >\n> > > > > Since the block-level measurement is likely overestimating quite\na bit, I propose to simply reverse the order of the actions here,\neffectively reporting progress for the *last page* and not the current one:\nFirst update progress with the current memory usage, then add tids for this\npage. If this allocated a new block, only a small bit of that will be\nwritten to. If this block pushes it over the limit, we will detect that up\nat the top of the loop. It's kind of like our earlier attempts at a \"fudge\nfactor\", but simpler and less brittle. And, as far as OS pages we have\nactually written to, I think it'll effectively respect the memory limit, at\nleast in the local mem case. And the numbers will make sense.\n\n> > I still like my idea at the top of the page -- at least for vacuum and\nm_w_m. It's still not completely clear if it's right but I've got nothing\nbetter. It also ignores the work_mem issue, but I've given up anticipating\nall future cases at the moment.\n\n> IIUC you suggested measuring memory usage by tracking how much memory\n> chunks are allocated within a block. If your idea at the top of the\n> page follows this method, it still doesn't deal with the point Andres\n> mentioned.\n\nRight, but that idea was orthogonal to how we measure memory use, and in\nfact mentions blocks specifically. The re-ordering was just to make sure\nthat progress reporting didn't show current-use > max-use.\n\nHowever, the big question remains DSA, since a new segment can be as large\nas the entire previous set of allocations. It seems it just wasn't designed\nfor things where memory growth is unpredictable.\n\nI'm starting to wonder if we need to give DSA a bit more info at the start.\nImagine a \"soft\" limit given to the DSA area when it is initialized. If the\ntotal segment usage exceeds this, it stops doubling and instead new\nsegments get smaller. Modifying an example we used for the fudge-factor\nidea some time ago:\n\nm_w_m = 1GB, so calculate the soft limit to be 512MB and pass it to the DSA\narea.\n\n2*(1+2+4+8+16+32+64+128) + 256 = 766MB (74.8% of 1GB) -> hit soft limit, so\n\"stairstep down\" the new segment sizes:\n\n766 + 2*(128) + 64 = 1086MB -> stop\n\nThat's just an undeveloped idea, however, so likely v17 development, even\nassuming it's not a bad idea (could be).\n\nAnd sadly, unless we find some other, simpler answer soon for tracking and\nlimiting shared memory, the tid store is looking like v17 material.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Wed, Mar 15, 2023 at 9:32 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Tue, Mar 14, 2023 at 8:27 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> >> > I wrote:> >> > > > > Since the block-level measurement is likely overestimating quite a bit, I propose to simply reverse the order of the actions here, effectively reporting progress for the *last page* and not the current one: First update progress with the current memory usage, then add tids for this page. If this allocated a new block, only a small bit of that will be written to. If this block pushes it over the limit, we will detect that up at the top of the loop. It's kind of like our earlier attempts at a \"fudge factor\", but simpler and less brittle. And, as far as OS pages we have actually written to, I think it'll effectively respect the memory limit, at least in the local mem case. And the numbers will make sense.> > I still like my idea at the top of the page -- at least for vacuum and m_w_m. It's still not completely clear if it's right but I've got nothing better. It also ignores the work_mem issue, but I've given up anticipating all future cases at the moment.> IIUC you suggested measuring memory usage by tracking how much memory> chunks are allocated within a block. If your idea at the top of the> page follows this method, it still doesn't deal with the point Andres> mentioned.Right, but that idea was orthogonal to how we measure memory use, and in fact mentions blocks specifically. The re-ordering was just to make sure that progress reporting didn't show current-use > max-use.However, the big question remains DSA, since a new segment can be as large as the entire previous set of allocations. It seems it just wasn't designed for things where memory growth is unpredictable.I'm starting to wonder if we need to give DSA a bit more info at the start. Imagine a \"soft\" limit given to the DSA area when it is initialized. If the total segment usage exceeds this, it stops doubling and instead new segments get smaller. Modifying an example we used for the fudge-factor idea some time ago:m_w_m = 1GB, so calculate the soft limit to be 512MB and pass it to the DSA area.2*(1+2+4+8+16+32+64+128) + 256 = 766MB (74.8% of 1GB) -> hit soft limit, so \"stairstep down\" the new segment sizes:766 + 2*(128) + 64 = 1086MB -> stopThat's just an undeveloped idea, however, so likely v17 development, even assuming it's not a bad idea (could be).And sadly, unless we find some other, simpler answer soon for tracking and limiting shared memory, the tid store is looking like v17 material.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Fri, 17 Mar 2023 14:02:53 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 17, 2023 at 4:03 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Wed, Mar 15, 2023 at 9:32 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Tue, Mar 14, 2023 at 8:27 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > I wrote:\n> > >\n> > > > > > Since the block-level measurement is likely overestimating quite a bit, I propose to simply reverse the order of the actions here, effectively reporting progress for the *last page* and not the current one: First update progress with the current memory usage, then add tids for this page. If this allocated a new block, only a small bit of that will be written to. If this block pushes it over the limit, we will detect that up at the top of the loop. It's kind of like our earlier attempts at a \"fudge factor\", but simpler and less brittle. And, as far as OS pages we have actually written to, I think it'll effectively respect the memory limit, at least in the local mem case. And the numbers will make sense.\n>\n> > > I still like my idea at the top of the page -- at least for vacuum and m_w_m. It's still not completely clear if it's right but I've got nothing better. It also ignores the work_mem issue, but I've given up anticipating all future cases at the moment.\n>\n> > IIUC you suggested measuring memory usage by tracking how much memory\n> > chunks are allocated within a block. If your idea at the top of the\n> > page follows this method, it still doesn't deal with the point Andres\n> > mentioned.\n>\n> Right, but that idea was orthogonal to how we measure memory use, and in fact mentions blocks specifically. The re-ordering was just to make sure that progress reporting didn't show current-use > max-use.\n\nRight. I still like your re-ordering idea. It's true that the most\narea of the last allocated block before heap scanning stops is not\nactually used yet. I'm guessing we can just check if the context\nmemory has gone over the limit. But I'm concerned it might not work\nwell in systems where overcommit memory is disabled.\n\n>\n> However, the big question remains DSA, since a new segment can be as large as the entire previous set of allocations. It seems it just wasn't designed for things where memory growth is unpredictable.\n>\n> I'm starting to wonder if we need to give DSA a bit more info at the start. Imagine a \"soft\" limit given to the DSA area when it is initialized. If the total segment usage exceeds this, it stops doubling and instead new segments get smaller. Modifying an example we used for the fudge-factor idea some time ago:\n>\n> m_w_m = 1GB, so calculate the soft limit to be 512MB and pass it to the DSA area.\n>\n> 2*(1+2+4+8+16+32+64+128) + 256 = 766MB (74.8% of 1GB) -> hit soft limit, so \"stairstep down\" the new segment sizes:\n>\n> 766 + 2*(128) + 64 = 1086MB -> stop\n>\n> That's just an undeveloped idea, however, so likely v17 development, even assuming it's not a bad idea (could be).\n\nThis is an interesting idea. But I'm concerned we don't have enough\ntime to get confident with adding this new concept to DSA.\n\n>\n> And sadly, unless we find some other, simpler answer soon for tracking and limiting shared memory, the tid store is looking like v17 material.\n\nAnother problem we need to deal with is the supported minimum memory\nin shared tidstore cases. Since the initial DSA segment size is 1MB,\nmemory usage of a shared tidstore will start from 1MB+. This is higher\nthan the minimum values of both work_mem and maintenance_work_mem,\n64kB and 1MB respectively. Increasing the minimum m_w_m to 2MB seems\nto be acceptable in the community but not for work_mem. One idea is to\ndeny the memory limit less than 2MB so it won't work with small m_w_m\nsettings. While it might be an acceptable restriction at this stage\n(where there is no use case of using tidstore with work_mem in the\ncore) but it will be a blocker for the future adoptions such as\nunifying with tidbitmap.c. Another idea is that the process can\nspecify the initial segment size at dsa_create() so that DSA can start\nwith a smaller segment, say 32kB. That way, a tidstore with a 32kB\nlimit gets full once it allocates the next DSA segment, 32kB. . But a\ndownside of this idea is to increase the number of segments behind\nDSA. Assuming it's a relatively rare case where we use such a low\nwork_mem, it might be acceptable. FYI, the total number of DSM\nsegments available on the system is calculated by:\n\n#define PG_DYNSHMEM_FIXED_SLOTS 64\n#define PG_DYNSHMEM_SLOTS_PER_BACKEND 5\n\nmaxitems = PG_DYNSHMEM_FIXED_SLOTS\n + PG_DYNSHMEM_SLOTS_PER_BACKEND * MaxBackends;\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 17 Mar 2023 16:49:33 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 17, 2023 at 4:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Fri, Mar 17, 2023 at 4:03 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> > On Wed, Mar 15, 2023 at 9:32 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Tue, Mar 14, 2023 at 8:27 PM John Naylor\n> > > <john.naylor@enterprisedb.com> wrote:\n> > > >\n> > > > I wrote:\n> > > >\n> > > > > > > Since the block-level measurement is likely overestimating quite a bit, I propose to simply reverse the order of the actions here, effectively reporting progress for the *last page* and not the current one: First update progress with the current memory usage, then add tids for this page. If this allocated a new block, only a small bit of that will be written to. If this block pushes it over the limit, we will detect that up at the top of the loop. It's kind of like our earlier attempts at a \"fudge factor\", but simpler and less brittle. And, as far as OS pages we have actually written to, I think it'll effectively respect the memory limit, at least in the local mem case. And the numbers will make sense.\n> >\n> > > > I still like my idea at the top of the page -- at least for vacuum and m_w_m. It's still not completely clear if it's right but I've got nothing better. It also ignores the work_mem issue, but I've given up anticipating all future cases at the moment.\n> >\n> > > IIUC you suggested measuring memory usage by tracking how much memory\n> > > chunks are allocated within a block. If your idea at the top of the\n> > > page follows this method, it still doesn't deal with the point Andres\n> > > mentioned.\n> >\n> > Right, but that idea was orthogonal to how we measure memory use, and in fact mentions blocks specifically. The re-ordering was just to make sure that progress reporting didn't show current-use > max-use.\n>\n> Right. I still like your re-ordering idea. It's true that the most\n> area of the last allocated block before heap scanning stops is not\n> actually used yet. I'm guessing we can just check if the context\n> memory has gone over the limit. But I'm concerned it might not work\n> well in systems where overcommit memory is disabled.\n>\n> >\n> > However, the big question remains DSA, since a new segment can be as large as the entire previous set of allocations. It seems it just wasn't designed for things where memory growth is unpredictable.\n\naset.c also has a similar characteristic; allocates an 8K block upon\nthe first allocation in a context, and doubles that size for each\nsuccessive block request. But we can specify the initial block size\nand max blocksize. This made me think of another idea to specify both\nto DSA and both values are calculated based on m_w_m. For example, we\ncan create a DSA in parallel_vacuum_init() as follows:\n\ninitial block size = min(m_w_m / 4, 1MB)\nmax block size = max(m_w_m / 8, 8MB)\n\nIn most cases, we can start with a 1MB initial segment, the same as\nbefore. For small memory cases, say 1MB, we start with a 256KB initial\nsegment and heap scanning stops after DSA allocated 1.5MB (= 256kB +\n256kB + 512kB + 512kB). For larger memory, we can have heap scan stop\nafter DSA allocates 1.25 times more memory than m_w_m. For example, if\nm_w_m = 1GB, the both initial and maximum segment sizes are 1MB and\n128MB respectively, and then DSA allocates the segments as follows\nuntil heap scanning stops:\n\n2 * (1 + 2 + 4 + 8 + 16 + 32 + 64 + 128) + (128 * 5) = 1150MB\n\ndsa_allocate() will be extended to have the initial and maximum block\nsizes like AllocSetContextCreate().\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 20 Mar 2023 14:24:38 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 20, 2023 at 12:25 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Fri, Mar 17, 2023 at 4:49 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> >\n> > On Fri, Mar 17, 2023 at 4:03 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > On Wed, Mar 15, 2023 at 9:32 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n> > > >\n> > > > On Tue, Mar 14, 2023 at 8:27 PM John Naylor\n> > > > <john.naylor@enterprisedb.com> wrote:\n> > > > >\n> > > > > I wrote:\n> > > > >\n> > > > > > > > Since the block-level measurement is likely overestimating\nquite a bit, I propose to simply reverse the order of the actions here,\neffectively reporting progress for the *last page* and not the current one:\nFirst update progress with the current memory usage, then add tids for this\npage. If this allocated a new block, only a small bit of that will be\nwritten to. If this block pushes it over the limit, we will detect that up\nat the top of the loop. It's kind of like our earlier attempts at a \"fudge\nfactor\", but simpler and less brittle. And, as far as OS pages we have\nactually written to, I think it'll effectively respect the memory limit, at\nleast in the local mem case. And the numbers will make sense.\n> > >\n> > > > > I still like my idea at the top of the page -- at least for\nvacuum and m_w_m. It's still not completely clear if it's right but I've\ngot nothing better. It also ignores the work_mem issue, but I've given up\nanticipating all future cases at the moment.\n> > >\n> > > > IIUC you suggested measuring memory usage by tracking how much\nmemory\n> > > > chunks are allocated within a block. If your idea at the top of the\n> > > > page follows this method, it still doesn't deal with the point\nAndres\n> > > > mentioned.\n> > >\n> > > Right, but that idea was orthogonal to how we measure memory use, and\nin fact mentions blocks specifically. The re-ordering was just to make sure\nthat progress reporting didn't show current-use > max-use.\n> >\n> > Right. I still like your re-ordering idea. It's true that the most\n> > area of the last allocated block before heap scanning stops is not\n> > actually used yet. I'm guessing we can just check if the context\n> > memory has gone over the limit. But I'm concerned it might not work\n> > well in systems where overcommit memory is disabled.\n> >\n> > >\n> > > However, the big question remains DSA, since a new segment can be as\nlarge as the entire previous set of allocations. It seems it just wasn't\ndesigned for things where memory growth is unpredictable.\n>\n> aset.c also has a similar characteristic; allocates an 8K block upon\n> the first allocation in a context, and doubles that size for each\n> successive block request. But we can specify the initial block size\n> and max blocksize. This made me think of another idea to specify both\n> to DSA and both values are calculated based on m_w_m. For example, we\n\nThat's an interesting idea, and the analogous behavior to aset could be a\ngood thing for readability and maintainability. Worth seeing if it's\nworkable.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Mar 20, 2023 at 12:25 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Fri, Mar 17, 2023 at 4:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> >> > On Fri, Mar 17, 2023 at 4:03 PM John Naylor> > <john.naylor@enterprisedb.com> wrote:> > >> > > On Wed, Mar 15, 2023 at 9:32 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> > > >> > > > On Tue, Mar 14, 2023 at 8:27 PM John Naylor> > > > <john.naylor@enterprisedb.com> wrote:> > > > >> > > > > I wrote:> > > > >> > > > > > > > Since the block-level measurement is likely overestimating quite a bit, I propose to simply reverse the order of the actions here, effectively reporting progress for the *last page* and not the current one: First update progress with the current memory usage, then add tids for this page. If this allocated a new block, only a small bit of that will be written to. If this block pushes it over the limit, we will detect that up at the top of the loop. It's kind of like our earlier attempts at a \"fudge factor\", but simpler and less brittle. And, as far as OS pages we have actually written to, I think it'll effectively respect the memory limit, at least in the local mem case. And the numbers will make sense.> > >> > > > > I still like my idea at the top of the page -- at least for vacuum and m_w_m. It's still not completely clear if it's right but I've got nothing better. It also ignores the work_mem issue, but I've given up anticipating all future cases at the moment.> > >> > > > IIUC you suggested measuring memory usage by tracking how much memory> > > > chunks are allocated within a block. If your idea at the top of the> > > > page follows this method, it still doesn't deal with the point Andres> > > > mentioned.> > >> > > Right, but that idea was orthogonal to how we measure memory use, and in fact mentions blocks specifically. The re-ordering was just to make sure that progress reporting didn't show current-use > max-use.> >> > Right. I still like your re-ordering idea. It's true that the most> > area of the last allocated block before heap scanning stops is not> > actually used yet. I'm guessing we can just check if the context> > memory has gone over the limit. But I'm concerned it might not work> > well in systems where overcommit memory is disabled.> >> > >> > > However, the big question remains DSA, since a new segment can be as large as the entire previous set of allocations. It seems it just wasn't designed for things where memory growth is unpredictable.>> aset.c also has a similar characteristic; allocates an 8K block upon> the first allocation in a context, and doubles that size for each> successive block request. But we can specify the initial block size> and max blocksize. This made me think of another idea to specify both> to DSA and both values are calculated based on m_w_m. For example, weThat's an interesting idea, and the analogous behavior to aset could be a good thing for readability and maintainability. Worth seeing if it's workable.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 20 Mar 2023 19:33:56 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 20, 2023 at 9:34 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Mon, Mar 20, 2023 at 12:25 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Fri, Mar 17, 2023 at 4:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Fri, Mar 17, 2023 at 4:03 PM John Naylor\n> > > <john.naylor@enterprisedb.com> wrote:\n> > > >\n> > > > On Wed, Mar 15, 2023 at 9:32 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > > >\n> > > > > On Tue, Mar 14, 2023 at 8:27 PM John Naylor\n> > > > > <john.naylor@enterprisedb.com> wrote:\n> > > > > >\n> > > > > > I wrote:\n> > > > > >\n> > > > > > > > > Since the block-level measurement is likely overestimating quite a bit, I propose to simply reverse the order of the actions here, effectively reporting progress for the *last page* and not the current one: First update progress with the current memory usage, then add tids for this page. If this allocated a new block, only a small bit of that will be written to. If this block pushes it over the limit, we will detect that up at the top of the loop. It's kind of like our earlier attempts at a \"fudge factor\", but simpler and less brittle. And, as far as OS pages we have actually written to, I think it'll effectively respect the memory limit, at least in the local mem case. And the numbers will make sense.\n> > > >\n> > > > > > I still like my idea at the top of the page -- at least for vacuum and m_w_m. It's still not completely clear if it's right but I've got nothing better. It also ignores the work_mem issue, but I've given up anticipating all future cases at the moment.\n> > > >\n> > > > > IIUC you suggested measuring memory usage by tracking how much memory\n> > > > > chunks are allocated within a block. If your idea at the top of the\n> > > > > page follows this method, it still doesn't deal with the point Andres\n> > > > > mentioned.\n> > > >\n> > > > Right, but that idea was orthogonal to how we measure memory use, and in fact mentions blocks specifically. The re-ordering was just to make sure that progress reporting didn't show current-use > max-use.\n> > >\n> > > Right. I still like your re-ordering idea. It's true that the most\n> > > area of the last allocated block before heap scanning stops is not\n> > > actually used yet. I'm guessing we can just check if the context\n> > > memory has gone over the limit. But I'm concerned it might not work\n> > > well in systems where overcommit memory is disabled.\n> > >\n> > > >\n> > > > However, the big question remains DSA, since a new segment can be as large as the entire previous set of allocations. It seems it just wasn't designed for things where memory growth is unpredictable.\n> >\n> > aset.c also has a similar characteristic; allocates an 8K block upon\n> > the first allocation in a context, and doubles that size for each\n> > successive block request. But we can specify the initial block size\n> > and max blocksize. This made me think of another idea to specify both\n> > to DSA and both values are calculated based on m_w_m. For example, we\n>\n> That's an interesting idea, and the analogous behavior to aset could be a good thing for readability and maintainability. Worth seeing if it's workable.\n\nI've attached a quick hack patch. It can be applied on top of v32\npatches. The changes to dsa.c are straightforward since it makes the\ninitial and max block sizes configurable. The patch includes a test\nfunction, test_memory_usage() to simulate how DSA segments grow behind\nthe shared radix tree. If we set the first argument to true, it\ncalculates both initial and maximum block size based on work_mem (I\nused work_mem here just because its value range is larger than m_w_m):\n\npostgres(1:833654)=# select test_memory_usage(true);\nNOTICE: memory limit 134217728\nNOTICE: init 1048576 max 16777216\nNOTICE: initial: 1048576\nNOTICE: rt_create: 1048576\nNOTICE: allocate new DSM [1] 1048576\nNOTICE: allocate new DSM [2] 2097152\nNOTICE: allocate new DSM [3] 2097152\nNOTICE: allocate new DSM [4] 4194304\nNOTICE: allocate new DSM [5] 4194304\nNOTICE: allocate new DSM [6] 8388608\nNOTICE: allocate new DSM [7] 8388608\nNOTICE: allocate new DSM [8] 16777216\nNOTICE: allocate new DSM [9] 16777216\nNOTICE: allocate new DSM [10] 16777216\nNOTICE: allocate new DSM [11] 16777216\nNOTICE: allocate new DSM [12] 16777216\nNOTICE: allocate new DSM [13] 16777216\nNOTICE: allocate new DSM [14] 16777216\nNOTICE: reached: 148897792 (+14680064)\nNOTICE: 12718205 keys inserted: 148897792\n test_memory_usage\n-------------------\n\n(1 row)\n\nTime: 7195.664 ms (00:07.196)\n\nSetting the first argument to false, we can specify both manually in\nsecond and third arguments:\n\npostgres(1:833654)=# select test_memory_usage(false, 1024 * 1024, 1024\n* 1024 * 1024 * 10::bigint);\nNOTICE: memory limit 134217728\nNOTICE: init 1048576 max 10737418240\nNOTICE: initial: 1048576\nNOTICE: rt_create: 1048576\nNOTICE: allocate new DSM [1] 1048576\nNOTICE: allocate new DSM [2] 2097152\nNOTICE: allocate new DSM [3] 2097152\nNOTICE: allocate new DSM [4] 4194304\nNOTICE: allocate new DSM [5] 4194304\nNOTICE: allocate new DSM [6] 8388608\nNOTICE: allocate new DSM [7] 8388608\nNOTICE: allocate new DSM [8] 16777216\nNOTICE: allocate new DSM [9] 16777216\nNOTICE: allocate new DSM [10] 33554432\nNOTICE: allocate new DSM [11] 33554432\nNOTICE: allocate new DSM [12] 67108864\nNOTICE: reached: 199229440 (+65011712)\nNOTICE: 12718205 keys inserted: 199229440\n test_memory_usage\n-------------------\n\n(1 row)\n\nTime: 7187.571 ms (00:07.188)\n\nIt seems to work fine. The differences between the above two cases is\nthe maximum block size (16MB .vs 10GB). We allocated two more DSA\nsegments in the first segments but there was no big difference in the\nperformance in my test environment.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 20 Mar 2023 23:34:08 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 20, 2023 at 9:34 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Mon, Mar 20, 2023 at 9:34 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> > That's an interesting idea, and the analogous behavior to aset could be\na good thing for readability and maintainability. Worth seeing if it's\nworkable.\n>\n> I've attached a quick hack patch. It can be applied on top of v32\n> patches. The changes to dsa.c are straightforward since it makes the\n> initial and max block sizes configurable.\n\nGood to hear -- this should probably be proposed in a separate thread for\nwider visibility.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Mar 20, 2023 at 9:34 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> On Mon, Mar 20, 2023 at 9:34 PM John Naylor> <john.naylor@enterprisedb.com> wrote:> > That's an interesting idea, and the analogous behavior to aset could be a good thing for readability and maintainability. Worth seeing if it's workable.>> I've attached a quick hack patch. It can be applied on top of v32> patches. The changes to dsa.c are straightforward since it makes the> initial and max block sizes configurable.Good to hear -- this should probably be proposed in a separate thread for wider visibility.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Tue, 21 Mar 2023 12:41:26 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Mar 21, 2023 at 2:41 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Mon, Mar 20, 2023 at 9:34 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Mar 20, 2023 at 9:34 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > > That's an interesting idea, and the analogous behavior to aset could be a good thing for readability and maintainability. Worth seeing if it's workable.\n> >\n> > I've attached a quick hack patch. It can be applied on top of v32\n> > patches. The changes to dsa.c are straightforward since it makes the\n> > initial and max block sizes configurable.\n>\n> Good to hear -- this should probably be proposed in a separate thread for wider visibility.\n\nAgreed. I'll start a new thread for that.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 21 Mar 2023 15:37:23 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Feb 16, 2023 at 11:44 PM Andres Freund <andres@anarazel.de> wrote:\n>\n> We really ought to replace the tid bitmap used for bitmap heap scans. The\n> hashtable we use is a pretty awful data structure for it. And that's not\n> filled in-order, for example.\n\nI spent some time studying tidbitmap.c, and not only does it make sense to\nuse a radix tree there, but since it has more complex behavior and stricter\nruntime requirements, it should really be the thing driving the design and\ntradeoffs, not vacuum:\n\n- With lazy expansion and single-value leaves, the root of a radix tree can\npoint to a single leaf. That might get rid of the need to track TBMStatus,\nsince setting a single-leaf tree should be cheap.\n\n- Fixed-size PagetableEntry's are pretty large, but the tid compression\nscheme used in this thread (in addition to being complex) is not a great\nfit for tidbitmap because it makes it more difficult to track per-block\nmetadata (see also next point). With the \"combined pointer-value slots\"\ntechnique, if a page's max tid offset is 63 or less, the offsets can be\nstored directly in the pointer for the exact case. The lowest bit can tag\nto indicate a pointer to a single-value leaf. That would complicate\noperations like union/intersection and tracking \"needs recheck\", but it\nwould reduce memory use and node-traversal in common cases.\n\n- Managing lossy storage. With pure blocknumber keys, replacing exact\nstorage for a range of 256 pages amounts to replacing a last-level node\nwith a single leaf containing one lossy PagetableEntry. The leader could\niterate over the nodes, and rank the last-level nodes by how much storage\nthey (possibly with leaf children) are using, and come up with an optimal\nlossy-conversion plan.\n\nThe above would address the points (not including better iteration and\nparallel bitmap index scans) raised in\n\nhttps://www.postgresql.org/message-id/CAPsAnrn5yWsoWs8GhqwbwAJx1SeLxLntV54Biq0Z-J_E86Fnng@mail.gmail.com\n\nIronically, by targeting a more difficult use case, it's easier since there\nis less freedom. There are many ways to beat a binary search, but fewer\ngood ways to improve bitmap heap scan. I'd like to put aside vacuum for\nsome time and try killing two birds with one stone, building upon our work\nthus far.\n\nNote: I've moved the CF entry to the next CF, and set to waiting on\nauthor for now. Since no action is currently required from Masahiko, I've\nadded myself as author as well. If tackling bitmap heap scan shows promise,\nwe could RWF and resurrect at a later time.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Thu, Feb 16, 2023 at 11:44 PM Andres Freund <andres@anarazel.de> wrote:>> We really ought to replace the tid bitmap used for bitmap heap scans. The> hashtable we use is a pretty awful data structure for it. And that's not> filled in-order, for example.I spent some time studying tidbitmap.c, and not only does it make sense to use a radix tree there, but since it has more complex behavior and stricter runtime requirements, it should really be the thing driving the design and tradeoffs, not vacuum:- With lazy expansion and single-value leaves, the root of a radix tree can point to a single leaf. That might get rid of the need to track TBMStatus, since setting a single-leaf tree should be cheap.- Fixed-size PagetableEntry's are pretty large, but the tid compression scheme used in this thread (in addition to being complex) is not a great fit for tidbitmap because it makes it more difficult to track per-block metadata (see also next point). With the \"combined pointer-value slots\" technique, if a page's max tid offset is 63 or less, the offsets can be stored directly in the pointer for the exact case. The lowest bit can tag to indicate a pointer to a single-value leaf. That would complicate operations like union/intersection and tracking \"needs recheck\", but it would reduce memory use and node-traversal in common cases.- Managing lossy storage. With pure blocknumber keys, replacing exact storage for a range of 256 pages amounts to replacing a last-level node with a single leaf containing one lossy PagetableEntry. The leader could iterate over the nodes, and rank the last-level nodes by how much storage they (possibly with leaf children) are using, and come up with an optimal lossy-conversion plan.The above would address the points (not including better iteration and parallel bitmap index scans) raised inhttps://www.postgresql.org/message-id/CAPsAnrn5yWsoWs8GhqwbwAJx1SeLxLntV54Biq0Z-J_E86Fnng@mail.gmail.comIronically, by targeting a more difficult use case, it's easier since there is less freedom. There are many ways to beat a binary search, but fewer good ways to improve bitmap heap scan. I'd like to put aside vacuum for some time and try killing two birds with one stone, building upon our work thus far. Note: I've moved the CF entry to the next CF, and set to waiting on author for now. Since no action is currently required from Masahiko, I've added myself as author as well. If tackling bitmap heap scan shows promise, we could RWF and resurrect at a later time.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Fri, 7 Apr 2023 16:55:41 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Apr 7, 2023 at 6:55 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> On Thu, Feb 16, 2023 at 11:44 PM Andres Freund <andres@anarazel.de> wrote:\n> >\n> > We really ought to replace the tid bitmap used for bitmap heap scans. The\n> > hashtable we use is a pretty awful data structure for it. And that's not\n> > filled in-order, for example.\n>\n> I spent some time studying tidbitmap.c, and not only does it make sense to use a radix tree there, but since it has more complex behavior and stricter runtime requirements, it should really be the thing driving the design and tradeoffs, not vacuum:\n>\n> - With lazy expansion and single-value leaves, the root of a radix tree can point to a single leaf. That might get rid of the need to track TBMStatus, since setting a single-leaf tree should be cheap.\n>\n\nInstead of introducing single-value leaves to the radix tree as\nanother structure, can we store pointers to PagetableEntry as values?\n\n> - Fixed-size PagetableEntry's are pretty large, but the tid compression scheme used in this thread (in addition to being complex) is not a great fit for tidbitmap because it makes it more difficult to track per-block metadata (see also next point). With the \"combined pointer-value slots\" technique, if a page's max tid offset is 63 or less, the offsets can be stored directly in the pointer for the exact case. The lowest bit can tag to indicate a pointer to a single-value leaf. That would complicate operations like union/intersection and tracking \"needs recheck\", but it would reduce memory use and node-traversal in common cases.\n>\n> - Managing lossy storage. With pure blocknumber keys, replacing exact storage for a range of 256 pages amounts to replacing a last-level node with a single leaf containing one lossy PagetableEntry. The leader could iterate over the nodes, and rank the last-level nodes by how much storage they (possibly with leaf children) are using, and come up with an optimal lossy-conversion plan.\n>\n> The above would address the points (not including better iteration and parallel bitmap index scans) raised in\n>\n> https://www.postgresql.org/message-id/CAPsAnrn5yWsoWs8GhqwbwAJx1SeLxLntV54Biq0Z-J_E86Fnng@mail.gmail.com\n>\n> Ironically, by targeting a more difficult use case, it's easier since there is less freedom. There are many ways to beat a binary search, but fewer good ways to improve bitmap heap scan. I'd like to put aside vacuum for some time and try killing two birds with one stone, building upon our work thus far.\n>\n> Note: I've moved the CF entry to the next CF, and set to waiting on author for now. Since no action is currently required from Masahiko, I've added myself as author as well. If tackling bitmap heap scan shows promise, we could RWF and resurrect at a later time.\n\nThanks. I'm going to continue researching the memory limitation and\ntry lazy path expansion until PG17 development begins.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 17 Apr 2023 22:48:27 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sat, Mar 11, 2023 at 12:26 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Fri, Mar 10, 2023 at 11:30 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Fri, Mar 10, 2023 at 3:42 PM John Naylor\n> > <john.naylor@enterprisedb.com> wrote:\n> > >\n> > > On Thu, Mar 9, 2023 at 1:51 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > > I've attached the new version patches. I merged improvements and fixes\n> > > > I did in the v29 patch.\n> > >\n> > > I haven't yet had a chance to look at those closely, since I've had to devote time to other commitments. I remember I wasn't particularly impressed that v29-0008 mixed my requested name-casing changes with a bunch of other random things. Separating those out would be an obvious way to make it easier for me to look at, whenever I can get back to this. I need to look at the iteration changes as well, in addition to testing memory measurement (thanks for the new results, they look encouraging).\n> >\n> > Okay, I'll separate them again.\n>\n> Attached new patch series. In addition to separate them again, I've\n> fixed a conflict with HEAD.\n>\n\nI've attached updated version patches to make cfbot happy. Also, I've\nsplitted fixup patches further(from 0007 except for 0016 and 0018) to\nmake reviews easy. These patches have the prefix radix tree, tidstore,\nand vacuum, indicating the part it changes. 0016 patch is to change\nDSA so that we can specify both the initial and max segment size and\n0017 makes use of it in vacuumparallel.c I'm still researching a\nbetter solution for memory limitation but it's the best solution for\nme for now.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 18 Apr 2023 00:20:16 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Apr 17, 2023 at 8:49 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> > - With lazy expansion and single-value leaves, the root of a radix tree\ncan point to a single leaf. That might get rid of the need to track\nTBMStatus, since setting a single-leaf tree should be cheap.\n> >\n>\n> Instead of introducing single-value leaves to the radix tree as\n> another structure, can we store pointers to PagetableEntry as values?\n\nWell, that's pretty much what a single-value leaf is. Now that I've had\ntime to pause and regroup, I've looked into some aspects we previously put\noff for future work, and this is one of them.\n\nThe concept is really quite trivial, and it's the simplest and most\nflexible way to implement ART. Our, or at least my, documented reason not\nto go that route was due to \"an extra pointer traversal\", but that's\npartially mitigated by \"lazy expansion\", which is actually fairly easy to\ndo with single-value leaves. The two techniques complement each other in a\nnatural way. (Path compression, on the other hand, is much more complex.)\n\n> > Note: I've moved the CF entry to the next CF, and set to waiting on\nauthor for now. Since no action is currently required from Masahiko, I've\nadded myself as author as well. If tackling bitmap heap scan shows promise,\nwe could RWF and resurrect at a later time.\n>\n> Thanks. I'm going to continue researching the memory limitation and\n\nSounds like the best thing to nail down at this point.\n\n> try lazy path expansion until PG17 development begins.\n\nThis doesn't seem like a useful thing to try and attach into the current\npatch (if that's what you mean), as the current insert/delete paths are\nquite complex. Using bitmap heap scan as a motivating use case, I hope to\nrefocus complexity to where it's most needed, and aggressively simplify\nwhere possible.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Apr 17, 2023 at 8:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:> > - With lazy expansion and single-value leaves, the root of a radix tree can point to a single leaf. That might get rid of the need to track TBMStatus, since setting a single-leaf tree should be cheap.> >>> Instead of introducing single-value leaves to the radix tree as> another structure, can we store pointers to PagetableEntry as values?Well, that's pretty much what a single-value leaf is. Now that I've had time to pause and regroup, I've looked into some aspects we previously put off for future work, and this is one of them.The concept is really quite trivial, and it's the simplest and most flexible way to implement ART. Our, or at least my, documented reason not to go that route was due to \"an extra pointer traversal\", but that's partially mitigated by \"lazy expansion\", which is actually fairly easy to do with single-value leaves. The two techniques complement each other in a natural way. (Path compression, on the other hand, is much more complex.)> > Note: I've moved the CF entry to the next CF, and set to waiting on author for now. Since no action is currently required from Masahiko, I've added myself as author as well. If tackling bitmap heap scan shows promise, we could RWF and resurrect at a later time.>> Thanks. I'm going to continue researching the memory limitation andSounds like the best thing to nail down at this point.> try lazy path expansion until PG17 development begins.This doesn't seem like a useful thing to try and attach into the current patch (if that's what you mean), as the current insert/delete paths are quite complex. Using bitmap heap scan as a motivating use case, I hope to refocus complexity to where it's most needed, and aggressively simplify where possible.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 19 Apr 2023 14:02:14 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Apr 19, 2023 at 4:02 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Apr 17, 2023 at 8:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > > - With lazy expansion and single-value leaves, the root of a radix tree can point to a single leaf. That might get rid of the need to track TBMStatus, since setting a single-leaf tree should be cheap.\n> > >\n> >\n> > Instead of introducing single-value leaves to the radix tree as\n> > another structure, can we store pointers to PagetableEntry as values?\n>\n> Well, that's pretty much what a single-value leaf is. Now that I've had time to pause and regroup, I've looked into some aspects we previously put off for future work, and this is one of them.\n>\n> The concept is really quite trivial, and it's the simplest and most flexible way to implement ART. Our, or at least my, documented reason not to go that route was due to \"an extra pointer traversal\", but that's partially mitigated by \"lazy expansion\", which is actually fairly easy to do with single-value leaves. The two techniques complement each other in a natural way. (Path compression, on the other hand, is much more complex.)\n>\n> > > Note: I've moved the CF entry to the next CF, and set to waiting on author for now. Since no action is currently required from Masahiko, I've added myself as author as well. If tackling bitmap heap scan shows promise, we could RWF and resurrect at a later time.\n> >\n> > Thanks. I'm going to continue researching the memory limitation and\n>\n> Sounds like the best thing to nail down at this point.\n>\n> > try lazy path expansion until PG17 development begins.\n>\n> This doesn't seem like a useful thing to try and attach into the current patch (if that's what you mean), as the current insert/delete paths are quite complex. Using bitmap heap scan as a motivating use case, I hope to refocus complexity to where it's most needed, and aggressively simplify where possible.\n>\n\nI agree that we don't want to make the current patch complex further.\n\nThinking about the memory limitation more, I think that combination of\nthe idea of specifying the initial and max DSA segment size and\ndsa_set_size_limit() works well. There are two points in terms of\nmemory limitation; when the memory usage reaches the limit we want (1)\nto minimize the last allocated memory block that is allocated but not\nused yet and (2) to minimize the amount of memory that exceeds the\nmemory limit. Since we can specify the maximum DSA segment size, the\nlast allocated block before reaching the memory limit is small. Also,\nthanks to dsa_set_size_limit(), the total DSA size will stop at the\nlimit, so (memory_usage >= memory_limit) returns true without any\nexceeding memory.\n\nGiven that we need to configure the initial and maximum DSA segment\nsize and set the DSA limit for TidStore memory accounting and\nlimiting, it would be better to create the DSA for TidStore by\nTidStoreCreate() API, rather than creating DSA in the caller and pass\nit to TidStoreCreate().\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 24 Apr 2023 14:45:04 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Apr 7, 2023 at 4:55 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n\n> - Fixed-size PagetableEntry's are pretty large, but the tid compression\nscheme used in this thread (in addition to being complex) is not a great\nfit for tidbitmap because it makes it more difficult to track per-block\nmetadata (see also next point). With the \"combined pointer-value slots\"\ntechnique, if a page's max tid offset is 63 or less, the offsets can be\nstored directly in the pointer for the exact case. The lowest bit can tag\nto indicate a pointer to a single-value leaf. That would complicate\noperations like union/intersection and tracking \"needs recheck\", but it\nwould reduce memory use and node-traversal in common cases.\n\n[just getting some thoughts out there before I have something concrete]\n\nThinking some more, this needn't be complicated at all. We'd just need to\nreserve some bits of a bitmapword for the tag, as well as flags for\n\"ischunk\" and \"recheck\". The other bits can be used for offsets.\nGetting/storing the offsets basically amounts to adjusting the shift by a\nconstant. That way, this \"embeddable PTE\" could serve as both \"PTE embedded\nin a node pointer\" and also the first member of a full PTE. A full PTE is\nnow just an array of embedded PTEs, except only the first one has the flags\nwe need. That reduces the number of places that have to be different.\nStoring any set of offsets all less than ~60 would save\nallocation/traversal in a large number of real cases. Furthermore, that\nwould reduce a full PTE to 40 bytes because there would be no padding.\n\nThis all assumes the key (block number) is no longer stored in the PTE,\nwhether embedded or not. That would mean this technique:\n\n> - With lazy expansion and single-value leaves, the root of a radix tree\ncan point to a single leaf. That might get rid of the need to track\nTBMStatus, since setting a single-leaf tree should be cheap.\n\n...is not a good trade off because it requires each leaf to have the key,\nand would thus reduce the utility of embedded leaves. We just need to make\nsure storing a single value is not costly, and I suspect it's not.\n(Currently the overhead avoided is allocating and zeroing a few kilobytes\nfor a hash table). If it is not, then we don't need a special case in\ntidbitmap, which would be a great simplification. If it is, there are other\nways to mitigate.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Fri, Apr 7, 2023 at 4:55 PM John Naylor <john.naylor@enterprisedb.com> wrote:> - Fixed-size PagetableEntry's are pretty large, but the tid compression scheme used in this thread (in addition to being complex) is not a great fit for tidbitmap because it makes it more difficult to track per-block metadata (see also next point). With the \"combined pointer-value slots\" technique, if a page's max tid offset is 63 or less, the offsets can be stored directly in the pointer for the exact case. The lowest bit can tag to indicate a pointer to a single-value leaf. That would complicate operations like union/intersection and tracking \"needs recheck\", but it would reduce memory use and node-traversal in common cases.[just getting some thoughts out there before I have something concrete]Thinking some more, this needn't be complicated at all. We'd just need to reserve some bits of a bitmapword for the tag, as well as flags for \"ischunk\" and \"recheck\". The other bits can be used for offsets. Getting/storing the offsets basically amounts to adjusting the shift by a constant. That way, this \"embeddable PTE\" could serve as both \"PTE embedded in a node pointer\" and also the first member of a full PTE. A full PTE is now just an array of embedded PTEs, except only the first one has the flags we need. That reduces the number of places that have to be different. Storing any set of offsets all less than ~60 would save allocation/traversal in a large number of real cases. Furthermore, that would reduce a full PTE to 40 bytes because there would be no padding.This all assumes the key (block number) is no longer stored in the PTE, whether embedded or not. That would mean this technique:> - With lazy expansion and single-value leaves, the root of a radix tree can point to a single leaf. That might get rid of the need to track TBMStatus, since setting a single-leaf tree should be cheap....is not a good trade off because it requires each leaf to have the key, and would thus reduce the utility of embedded leaves. We just need to make sure storing a single value is not costly, and I suspect it's not. (Currently the overhead avoided is allocating and zeroing a few kilobytes for a hash table). If it is not, then we don't need a special case in tidbitmap, which would be a great simplification. If it is, there are other ways to mitigate.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 8 May 2023 17:23:23 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I wrote:\n> the current insert/delete paths are quite complex. Using bitmap heap scan\nas a motivating use case, I hope to refocus complexity to where it's most\nneeded, and aggressively simplify where possible.\n\nSometime in the not-too-distant future, I will start a new thread focusing\non bitmap heap scan, but for now, I just want to share some progress on\nmaking the radix tree usable not only for that, but hopefully a wider range\nof applications, while making the code simpler and the binary smaller. The\nattached patches are incomplete (e.g. no iteration) and quite a bit messy,\nso tar'd and gzip'd for the curious (should apply on top of v32 0001-03 +\n0007-09 ).\n\n0001\n\nThis combines a few concepts that I didn't bother separating out after the\nfact:\n- Split insert_impl.h into multiple functions for improved readability and\nmaintainability.\n- Use single-value leaves as the basis for storing values, with the goal to\nget to \"combined pointer-value slots\" for efficiency and flexibility.\n- With the latter in mind, searching the child within a node now returns\nthe address of the slot. This allows the same interface whether the slot\ncontains a child pointer or a value.\n- Starting with RT_SET, start turning some iterative algorithms into\nrecursive ones. This is a more natural way to traverse a tree structure,\nand we already see an advantage: Previously when growing a node, we\nsearched within the parent to update its reference to the new node, because\nwe didn't know the slot we descended from. Now we can simply update a\nsingle variable.\n- Since we recursively pass the \"shift\" down the stack, it doesn't have to\nbe stored in any node -- only the \"top-level\" start shift is stored in the\ntree control struct. This was easy to code since the node's shift value was\nhardly ever accessed anyway! The node header shrinks from 5 bytes to 4.\n\n0002\n\nBack in v15, we tried keeping DSA/local pointers as members of a struct. I\ndid not like the result, but still thought it was a good idea. RT_DELETE is\na complex function and I didn't want to try rewriting it without a pointer\nabstraction, so I've resurrected this idea, but in a simpler, less\nintrusive way. A key difference from v15 is using a union type for the\nnon-shmem case.\n\n0004\n\nRewrite RT_DELETE using recursion. I find this simpler than the previous\nopen-coded stack.\n\n0005-06\n\nDeletion has an inefficiency: One function searches for the child to see if\nit's there, then another function searches for it again to delete it. Since\n0001, a successful child search returns the address of the slot, so we can\nsave it. For the two smaller \"linear search\" node kinds we can then use a\nsingle subtraction to compute the chunk/slot index for deletion. Also,\nsplit RT_NODE_DELETE_INNER into separate functions, for a similar reason as\nthe insert case in 0001.\n\n0007\n\nAnticipate node shrinking: If only one node-kind needs to be freed, we can\nmove a branch to that one code path, rather than every place where RT_FREE\nis inlined.\n\n0009\n\nTeach node256 how to shrink *. Since we know the number of children in a\nnode256 can't possibly be zero, we can use uint8 to store the count and\ninterpret an overflow to zero as 256 for this node. The node header shrinks\nfrom 4 bytes to 3.\n\n* Other nodes will follow in due time, but only after I figure out how to\ndo it nicely (ideas welcome!) -- currently node32's two size classes work\nfine for growing, but the code should be simplified before extending to\nother cases.)\n\n0010\n\nLimited support for \"combined pointer-value slots\". At compile-time, choose\neither that or \"single-value leaves\" based on the size of the value type\ntemplate parameter. Values that are pointer-sized or less can fit in the\nlast-level child slots of nominal \"inner nodes\" without duplicated\nleaf-node code. Node256 now must act like the previous 'node256 leaf',\nsince zero is a valid value. Aside from that, this was a small change.\n\nWhat I've shared here could work (in principal, since it uses uint64\nvalues) for tidstore, possibly faster (untested) because of better code\ndensity, but as mentioned I want to shoot for higher. For tidbitmap.c, I\nwant to extend this idea and branch at run-time on a per-value basis, so\nthat a page-table entry that fits in a pointer can go there, and if not,\nit'll be a full leaf. (This technique enables more flexibility in\nlossifying pages as well.) Run-time info will require e.g. an additional\nbit per slot. Since the node header is now 3 bytes, we can spare one more\nbyte in the node3 case. In addition, we can and should also bump it back up\nto node4, still keeping the metadata within 8 bytes (no struct padding).\n\nI've started in this patchset to refer to the node kinds as \"4/16/48/256\",\nregardless of their actual fanout. This is for readability (by matching the\nlanguage in the paper) and maintainability (should *not* ever change\nagain). The size classes (including multiple classes per kind) could be\ndetermined by macros and #ifdef's. For example, in non-SIMD architectures,\nit's likely slow to search an array of 32 key chunks, so in that case the\ncompiler should choose size classes similar to these four nominal kinds.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 24 May 2023 06:16:54 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn Tue, May 23, 2023 at 7:17 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> I wrote:\n> > the current insert/delete paths are quite complex. Using bitmap heap scan as a motivating use case, I hope to refocus complexity to where it's most needed, and aggressively simplify where possible.\n>\n> Sometime in the not-too-distant future, I will start a new thread focusing on bitmap heap scan, but for now, I just want to share some progress on making the radix tree usable not only for that, but hopefully a wider range of applications, while making the code simpler and the binary smaller. The attached patches are incomplete (e.g. no iteration) and quite a bit messy, so tar'd and gzip'd for the curious (should apply on top of v32 0001-03 + 0007-09 ).\n>\n\nThank you for making progress on this. I agree with these directions\noverall. I have some comments and questions:\n\n> - With the latter in mind, searching the child within a node now returns the address of the slot. This allows the same interface whether the slot contains a child pointer or a value.\n\nProbably we can apply similar changes to the iteration as well.\n\n> * Other nodes will follow in due time, but only after I figure out how to do it nicely (ideas welcome!) -- currently node32's two size classes work fine for growing, but the code should be simplified before extending to other cases.)\n\nWithin the size class, we just alloc a new node of lower size class\nand do memcpy(). I guess it will be almost same as what we do for\ngrowing. It might be a good idea to support node shrinking within the\nsize class for node32 (and node125 if we support). I don't think\nshrinking class-3 to class-1 makes sense.\n\n>\n> Limited support for \"combined pointer-value slots\". At compile-time, choose either that or \"single-value leaves\" based on the size of the value type template parameter. Values that are pointer-sized or less can fit in the last-level child slots of nominal \"inner nodes\" without duplicated leaf-node code. Node256 now must act like the previous 'node256 leaf', since zero is a valid value. Aside from that, this was a small change.\n\nYes, but it also means that we use pointer-sized value anyway even if\nthe value size is less than that, which wastes the memory, no?\n\n>\n> What I've shared here could work (in principal, since it uses uint64 values) for tidstore, possibly faster (untested) because of better code density, but as mentioned I want to shoot for higher. For tidbitmap.c, I want to extend this idea and branch at run-time on a per-value basis, so that a page-table entry that fits in a pointer can go there, and if not, it'll be a full leaf. (This technique enables more flexibility in lossifying pages as well.) Run-time info will require e.g. an additional bit per slot. Since the node header is now 3 bytes, we can spare one more byte in the node3 case. In addition, we can and should also bump it back up to node4, still keeping the metadata within 8 bytes (no struct padding).\n\nSounds good.\n\n> I've started in this patchset to refer to the node kinds as \"4/16/48/256\", regardless of their actual fanout. This is for readability (by matching the language in the paper) and maintainability (should *not* ever change again). The size classes (including multiple classes per kind) could be determined by macros and #ifdef's. For example, in non-SIMD architectures, it's likely slow to search an array of 32 key chunks, so in that case the compiler should choose size classes similar to these four nominal kinds.\n\nIf we want to use the node kinds used in the paper, I think we should\nchange the number in RT_NODE_KIND_X too. Otherwise, it would be\nconfusing when reading the code without referring to the paper.\nParticularly, this part is very confusing:\n\n case RT_NODE_KIND_3:\n RT_ADD_CHILD_4(tree, ref, node, chunk, child);\n break;\n case RT_NODE_KIND_32:\n RT_ADD_CHILD_16(tree, ref, node, chunk, child);\n break;\n case RT_NODE_KIND_125:\n RT_ADD_CHILD_48(tree, ref, node, chunk, child);\n break;\n case RT_NODE_KIND_256:\n RT_ADD_CHILD_256(tree, ref, node, chunk, child);\n break;\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 5 Jun 2023 06:31:41 -0400", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jun 5, 2023 at 5:32 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> > Sometime in the not-too-distant future, I will start a new thread\nfocusing on bitmap heap scan, but for now, I just want to share some\nprogress on making the radix tree usable not only for that, but hopefully a\nwider range of applications, while making the code simpler and the binary\nsmaller. The attached patches are incomplete (e.g. no iteration) and quite\na bit messy, so tar'd and gzip'd for the curious (should apply on top of\nv32 0001-03 + 0007-09 ).\n> >\n>\n> Thank you for making progress on this. I agree with these directions\n> overall. I have some comments and questions:\n\nGlad to hear it and thanks for looking!\n\n> > * Other nodes will follow in due time, but only after I figure out how\nto do it nicely (ideas welcome!) -- currently node32's two size classes\nwork fine for growing, but the code should be simplified before extending\nto other cases.)\n>\n> Within the size class, we just alloc a new node of lower size class\n> and do memcpy(). I guess it will be almost same as what we do for\n> growing.\n\nOh, the memcpy part is great, very simple. I mean the (compile-time) \"class\ninfo\" table lookups are a bit awkward. I'm thinking the hard-coded numbers\nlike this:\n\n.fanout = 3,\n.inner_size = sizeof(RT_NODE_INNER_3) + 3 * sizeof(RT_PTR_ALLOC),\n\n...may be better with a #defined symbol that can also be used elsewhere.\n\n> I don't think\n> shrinking class-3 to class-1 makes sense.\n\nAgreed. The smallest kind should just be freed when empty.\n\n> > Limited support for \"combined pointer-value slots\". At compile-time,\nchoose either that or \"single-value leaves\" based on the size of the value\ntype template parameter. Values that are pointer-sized or less can fit in\nthe last-level child slots of nominal \"inner nodes\" without duplicated\nleaf-node code. Node256 now must act like the previous 'node256 leaf',\nsince zero is a valid value. Aside from that, this was a small change.\n>\n> Yes, but it also means that we use pointer-sized value anyway even if\n> the value size is less than that, which wastes the memory, no?\n\nAt a low-level, that makes sense, but I've found an interesting global\neffect showing the opposite: _less_ memory, which may compensate:\n\npsql -c \"select * from bench_search_random_nodes(1*1000*1000)\"\nnum_keys = 992660\n\n(using a low enough number that the experimental change n125->n63 doesn't\naffect anything)\nheight = 4, n3 = 375258, n15 = 137490, n32 = 0, n63 = 0, n256 = 1025\n\nv31:\n mem_allocated | load_ms | search_ms\n---------------+---------+-----------\n 47800768 | 253 | 134\n\n(unreleased code \"similar\" to v33, but among other things restores the\nseparate \"extend down\" function)\n mem_allocated | load_ms | search_ms\n---------------+---------+-----------\n 42926048 | 221 | 127\n\nI'd need to make sure, but apparently just going from 6 non-empty memory\ncontexts to 3 (remember all values are embedded here) reduces memory\nfragmentation significantly in this test. (That should also serve as a\ndemonstration that additional size classes have both runtime costs as well\nas benefits. We need to have a balance.)\n\nSo, I'm inclined to think the only reason to prefer \"multi-value leaves\" is\nif 1) the value type is _bigger_ than a pointer 2) there is no convenient\nabbreviation (like tid bitmaps have) and 3) the use case really needs to\navoid another memory access. Under those circumstances, though, the new\ncode plus lazy expansion etc might suit and be easier to maintain. That\nsaid, I've mostly left alone the \"leaf\" types and functions, as well as\nadded some detritus like \"const bool = false;\". It would look a *lot* nicer\nif we gave up on multi-value leaves entirely, but there's no rush and I\ndon't want to close that door entirely just yet.\n\n> > What I've shared here could work (in principal, since it uses uint64\nvalues) for tidstore, possibly faster (untested) because of better code\ndensity, but as mentioned I want to shoot for higher. For tidbitmap.c, I\nwant to extend this idea and branch at run-time on a per-value basis, so\nthat a page-table entry that fits in a pointer can go there, and if not,\nit'll be a full leaf. (This technique enables more flexibility in\nlossifying pages as well.) Run-time info will require e.g. an additional\nbit per slot. Since the node header is now 3 bytes, we can spare one more\nbyte in the node3 case. In addition, we can and should also bump it back up\nto node4, still keeping the metadata within 8 bytes (no struct padding).\n>\n> Sounds good.\n\nThe additional bit per slot would require per-node logic and additional\nbranches, which is not great. I'm now thinking a much easier way to get\nthere is to give up (at least for now) on promising that \"run-time\nembeddable values\" can use the full pointer-size (unlike value types found\nembeddable at compile-time). Reserving the lowest pointer bit for a tag\n\"value or pointer-to-leaf\" would have a much smaller code footprint. That\nalso has a curious side-effect for TID offsets: They are one-based so\nreserving the zero bit would actually simplify things: getting rid of the\n+1/-1 logic when converting bits to/from offsets.\n\nIn addition, without a new bitmap, the smallest node can actually be up to\na node5 with no struct padding, with a node2 as a subclass. (Those numbers\ncoincidentally were also one scenario in the paper, when calculating\nworst-case memory usage). That's worth considering.\n\n> > I've started in this patchset to refer to the node kinds as\n\"4/16/48/256\", regardless of their actual fanout.\n\n> If we want to use the node kinds used in the paper, I think we should\n> change the number in RT_NODE_KIND_X too.\n\nOh absolutely, this is nowhere near ready for cosmetic review :-)\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Jun 5, 2023 at 5:32 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> > Sometime in the not-too-distant future, I will start a new thread focusing on bitmap heap scan, but for now, I just want to share some progress on making the radix tree usable not only for that, but hopefully a wider range of applications, while making the code simpler and the binary smaller. The attached patches are incomplete (e.g. no iteration) and quite a bit messy, so tar'd and gzip'd for the curious (should apply on top of v32 0001-03 + 0007-09 ).> >>> Thank you for making progress on this. I agree with these directions> overall. I have some comments and questions:Glad to hear it and thanks for looking!> > * Other nodes will follow in due time, but only after I figure out how to do it nicely (ideas welcome!) -- currently node32's two size classes work fine for growing, but the code should be simplified before extending to other cases.)>> Within the size class, we just alloc a new node of lower size class> and do memcpy(). I guess it will be almost same as what we do for> growing.Oh, the memcpy part is great, very simple. I mean the (compile-time) \"class info\" table lookups are a bit awkward. I'm thinking the hard-coded numbers like this:.fanout = 3,.inner_size = sizeof(RT_NODE_INNER_3) + 3 * sizeof(RT_PTR_ALLOC),...may be better with a #defined symbol that can also be used elsewhere.> I don't think> shrinking class-3 to class-1 makes sense.Agreed. The smallest kind should just be freed when empty.> > Limited support for \"combined pointer-value slots\". At compile-time, choose either that or \"single-value leaves\" based on the size of the value type template parameter. Values that are pointer-sized or less can fit in the last-level child slots of nominal \"inner nodes\" without duplicated leaf-node code. Node256 now must act like the previous 'node256 leaf', since zero is a valid value. Aside from that, this was a small change.>> Yes, but it also means that we use pointer-sized value anyway even if> the value size is less than that, which wastes the memory, no?At a low-level, that makes sense, but I've found an interesting global effect showing the opposite: _less_ memory, which may compensate:psql -c \"select * from bench_search_random_nodes(1*1000*1000)\"num_keys = 992660(using a low enough number that the experimental change n125->n63 doesn't affect anything)height = 4, n3 = 375258, n15 = 137490, n32 = 0, n63 = 0, n256 = 1025v31: mem_allocated | load_ms | search_ms ---------------+---------+-----------      47800768 |     253 |       134(unreleased code \"similar\" to v33, but among other things restores the separate \"extend down\" function) mem_allocated | load_ms | search_ms ---------------+---------+-----------      42926048 |     221 |       127I'd need to make sure, but apparently just going from 6 non-empty memory contexts to 3 (remember all values are embedded here) reduces memory fragmentation significantly in this test. (That should also serve as a demonstration that additional size classes have both runtime costs as well as benefits. We need to have a balance.)So, I'm inclined to think the only reason to prefer \"multi-value leaves\" is if 1) the value type is _bigger_ than a pointer 2) there is no convenient abbreviation (like tid bitmaps have) and 3) the use case really needs to avoid another memory access. Under those circumstances, though, the new code plus lazy expansion etc might suit and be easier to maintain. That said, I've mostly left alone the \"leaf\" types and functions, as well as added some detritus like \"const bool = false;\". It would look a *lot* nicer if we gave up on multi-value leaves entirely, but there's no rush and I don't want to close that door entirely just yet.> > What I've shared here could work (in principal, since it uses uint64 values) for tidstore, possibly faster (untested) because of better code density, but as mentioned I want to shoot for higher. For tidbitmap.c, I want to extend this idea and branch at run-time on a per-value basis, so that a page-table entry that fits in a pointer can go there, and if not, it'll be a full leaf. (This technique enables more flexibility in lossifying pages as well.) Run-time info will require e.g. an additional bit per slot. Since the node header is now 3 bytes, we can spare one more byte in the node3 case. In addition, we can and should also bump it back up to node4, still keeping the metadata within 8 bytes (no struct padding).>> Sounds good.The additional bit per slot would require per-node logic and additional branches, which is not great. I'm now thinking a much easier way to get there is to give up (at least for now) on promising that \"run-time embeddable values\" can use the full pointer-size (unlike value types found embeddable at compile-time). Reserving the lowest pointer bit for a tag \"value or pointer-to-leaf\" would have a much smaller code footprint. That also has a curious side-effect for TID offsets: They are one-based so reserving the zero bit would actually simplify things: getting rid of the +1/-1 logic when converting bits to/from offsets.In addition, without a new bitmap, the smallest node can actually be up to a node5 with no struct padding, with a node2 as a subclass. (Those numbers coincidentally were also one scenario in the paper, when calculating worst-case memory usage). That's worth considering.> > I've started in this patchset to refer to the node kinds as \"4/16/48/256\", regardless of their actual fanout.> If we want to use the node kinds used in the paper, I think we should> change the number in RT_NODE_KIND_X too. Oh absolutely, this is nowhere near ready for cosmetic review :-) --John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Tue, 6 Jun 2023 12:13:36 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jun 6, 2023 at 2:13 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> On Mon, Jun 5, 2023 at 5:32 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > Sometime in the not-too-distant future, I will start a new thread focusing on bitmap heap scan, but for now, I just want to share some progress on making the radix tree usable not only for that, but hopefully a wider range of applications, while making the code simpler and the binary smaller. The attached patches are incomplete (e.g. no iteration) and quite a bit messy, so tar'd and gzip'd for the curious (should apply on top of v32 0001-03 + 0007-09 ).\n> > >\n> >\n> > Thank you for making progress on this. I agree with these directions\n> > overall. I have some comments and questions:\n>\n> Glad to hear it and thanks for looking!\n>\n> > > * Other nodes will follow in due time, but only after I figure out how to do it nicely (ideas welcome!) -- currently node32's two size classes work fine for growing, but the code should be simplified before extending to other cases.)\n> >\n> > Within the size class, we just alloc a new node of lower size class\n> > and do memcpy(). I guess it will be almost same as what we do for\n> > growing.\n>\n> Oh, the memcpy part is great, very simple. I mean the (compile-time) \"class info\" table lookups are a bit awkward. I'm thinking the hard-coded numbers like this:\n>\n> .fanout = 3,\n> .inner_size = sizeof(RT_NODE_INNER_3) + 3 * sizeof(RT_PTR_ALLOC),\n>\n> ...may be better with a #defined symbol that can also be used elsewhere.\n\nFWIW, exposing these definitions would be good in terms of testing too\nsince we can use them in regression tests.\n\n>\n> > I don't think\n> > shrinking class-3 to class-1 makes sense.\n>\n> Agreed. The smallest kind should just be freed when empty.\n>\n> > > Limited support for \"combined pointer-value slots\". At compile-time, choose either that or \"single-value leaves\" based on the size of the value type template parameter. Values that are pointer-sized or less can fit in the last-level child slots of nominal \"inner nodes\" without duplicated leaf-node code. Node256 now must act like the previous 'node256 leaf', since zero is a valid value. Aside from that, this was a small change.\n> >\n> > Yes, but it also means that we use pointer-sized value anyway even if\n> > the value size is less than that, which wastes the memory, no?\n>\n> At a low-level, that makes sense, but I've found an interesting global effect showing the opposite: _less_ memory, which may compensate:\n>\n> psql -c \"select * from bench_search_random_nodes(1*1000*1000)\"\n> num_keys = 992660\n>\n> (using a low enough number that the experimental change n125->n63 doesn't affect anything)\n> height = 4, n3 = 375258, n15 = 137490, n32 = 0, n63 = 0, n256 = 1025\n>\n> v31:\n> mem_allocated | load_ms | search_ms\n> ---------------+---------+-----------\n> 47800768 | 253 | 134\n>\n> (unreleased code \"similar\" to v33, but among other things restores the separate \"extend down\" function)\n> mem_allocated | load_ms | search_ms\n> ---------------+---------+-----------\n> 42926048 | 221 | 127\n>\n> I'd need to make sure, but apparently just going from 6 non-empty memory contexts to 3 (remember all values are embedded here) reduces memory fragmentation significantly in this test. (That should also serve as a demonstration that additional size classes have both runtime costs as well as benefits. We need to have a balance.)\n\nInteresting. The result would probably vary if we change the slab\nblock sizes. I'd like to experiment if the code is available.\n\n>\n> So, I'm inclined to think the only reason to prefer \"multi-value leaves\" is if 1) the value type is _bigger_ than a pointer 2) there is no convenient abbreviation (like tid bitmaps have) and 3) the use case really needs to avoid another memory access. Under those circumstances, though, the new code plus lazy expansion etc might suit and be easier to maintain.\n\nIndeed.\n\n>\n> > > What I've shared here could work (in principal, since it uses uint64 values) for tidstore, possibly faster (untested) because of better code density, but as mentioned I want to shoot for higher. For tidbitmap.c, I want to extend this idea and branch at run-time on a per-value basis, so that a page-table entry that fits in a pointer can go there, and if not, it'll be a full leaf. (This technique enables more flexibility in lossifying pages as well.) Run-time info will require e.g. an additional bit per slot. Since the node header is now 3 bytes, we can spare one more byte in the node3 case. In addition, we can and should also bump it back up to node4, still keeping the metadata within 8 bytes (no struct padding).\n> >\n> > Sounds good.\n>\n> The additional bit per slot would require per-node logic and additional branches, which is not great. I'm now thinking a much easier way to get there is to give up (at least for now) on promising that \"run-time embeddable values\" can use the full pointer-size (unlike value types found embeddable at compile-time). Reserving the lowest pointer bit for a tag \"value or pointer-to-leaf\" would have a much smaller code footprint.\n\nDo you mean we can make sure that the value doesn't set the lowest\nbit? Or is it an optimization for TIDStore?\n\n> In addition, without a new bitmap, the smallest node can actually be up to a node5 with no struct padding, with a node2 as a subclass. (Those numbers coincidentally were also one scenario in the paper, when calculating worst-case memory usage). That's worth considering.\n\nAgreed.\n\nFWIW please let me know if there are some experiments I can help with.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 13 Jun 2023 14:46:57 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jun 13, 2023 at 12:47 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Tue, Jun 6, 2023 at 2:13 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n> >\n\n> > I'd need to make sure, but apparently just going from 6 non-empty\nmemory contexts to 3 (remember all values are embedded here) reduces memory\nfragmentation significantly in this test. (That should also serve as a\ndemonstration that additional size classes have both runtime costs as well\nas benefits. We need to have a balance.)\n>\n> Interesting. The result would probably vary if we change the slab\n> block sizes. I'd like to experiment if the code is available.\n\nI cleaned up a few things and attached v34 so you can do that if you like.\n(Note: what I said about node63/n125 not making a difference in that one\ntest is not quite true since slab keeps a few empty blocks around. I did\nsome rough mental math and I think it doesn't change the conclusion any.)\n\n0001-0007 is basically v33, but can apply on master.\n\n0008 just adds back RT_EXTEND_DOWN. I left it out to simplify moving to\nrecursion.\n\n> > Oh, the memcpy part is great, very simple. I mean the (compile-time)\n\"class info\" table lookups are a bit awkward. I'm thinking the hard-coded\nnumbers like this:\n> >\n> > .fanout = 3,\n> > .inner_size = sizeof(RT_NODE_INNER_3) + 3 * sizeof(RT_PTR_ALLOC),\n> >\n> > ...may be better with a #defined symbol that can also be used elsewhere.\n>\n> FWIW, exposing these definitions would be good in terms of testing too\n> since we can use them in regression tests.\n\nI added some definitions in 0012. It kind of doesn't matter now what sizes\nare the test unless it also can test that it stays within the expected\nsize, if that makes sense. It is helpful during debugging to force growth\nto stop at a certain size.\n\n> > > Within the size class, we just alloc a new node of lower size class\n> > > and do memcpy().\n\nNot anymore. ;-) To be technical, it didn't \"just\" memcpy(), since it then\nfell through to find the insert position and memmove(). In some parts of\nAndres' prototype, no memmove() is necessary, because it memcpy()'s around\nthe insert position, and puts the new child in the right place. I've done\nthis in 0009.\n\nThe memcpy you mention was done for 1) simplicity 2) to avoid memset'ing.\nWell, it was never necessary to memset the whole node in the first place.\nOnly the header, slot index array, and isset arrays need to be zeroed, so\nin 0011 we always do only that. That combines alloc and init functionality,\nand it's simple everywhere.\n\nIn 0010 I restored iteration functionality -- it can no longer get the\nshift from the node, because it's not there as of v33. I was not\nparticularly impressed that there were no basic iteration tests, and in\nfact the test_pattern test relied on functioning iteration. I added some\nbasic tests. I'm not entirely pleased with testing overall, but I think\nit's at least sufficient for the job. I had the idea to replace \"shift\"\neverywhere and use \"level\" as a fundamental concept. This is clearer. I do\nwant to make sure the compiler can compute the shift efficiently where\nnecessary. I think that can wait until much later.\n\n0013 standardizes (mostly) on 4/16/48/256 for naming convention, regardless\nof actual size, as I started to do earlier.\n\n0014 is part cleanup of shrinking, and part making grow-node-48 more\nconsistent with the rest.\n\n> > The additional bit per slot would require per-node logic and additional\nbranches, which is not great. I'm now thinking a much easier way to get\nthere is to give up (at least for now) on promising that \"run-time\nembeddable values\" can use the full pointer-size (unlike value types found\nembeddable at compile-time). Reserving the lowest pointer bit for a tag\n\"value or pointer-to-leaf\" would have a much smaller code footprint.\n>\n> Do you mean we can make sure that the value doesn't set the lowest\n> bit? Or is it an optimization for TIDStore?\n\nIt will be up to the caller (the user of the template) -- if an\nabbreviation is possible that fits in the upper 63 bits (with something to\nguard for 32-bit platforms), the developer will be able to specify a\nconversion function so that the caller only sees the full value when\nsearching and setting. Without such a function, the template will fall back\nto the size of the value type to determine how the value is stored.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 14 Jun 2023 13:23:07 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I wrote:\n> I cleaned up a few things and attached v34 so you can do that if you\nlike.\n\nOf course, \"clean\" is a relative term. While making a small bit of progress\nworking in tidbitmap.c earlier this week, I thought it useful to prototype\nsome things in the tidstore, at which point I was reminded it no longer\ncompiles because of my recent work. I put in the necessary incantations so\nthat the v32 tidstore compiles and passes tests, so here's a patchset for\nthat (but no vacuum changes). I thought it was a good time to also condense\nit down to look more similar to previous patches, as a basis for future\nwork.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Fri, 23 Jun 2023 16:54:21 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jun 23, 2023 at 6:54 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> I wrote:\n> > I cleaned up a few things and attached v34 so you can do that if you like.\n>\n> Of course, \"clean\" is a relative term. While making a small bit of progress working in tidbitmap.c earlier this week, I thought it useful to prototype some things in the tidstore, at which point I was reminded it no longer compiles because of my recent work. I put in the necessary incantations so that the v32 tidstore compiles and passes tests, so here's a patchset for that (but no vacuum changes). I thought it was a good time to also condense it down to look more similar to previous patches, as a basis for future work.\n>\n\nThank you for updating the patch set. I'll look at updates closely\nearly next week.\n\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 27 Jun 2023 17:20:03 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jun 27, 2023 at 5:20 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Fri, Jun 23, 2023 at 6:54 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> >\n> > I wrote:\n> > > I cleaned up a few things and attached v34 so you can do that if you like.\n> >\n> > Of course, \"clean\" is a relative term. While making a small bit of progress working in tidbitmap.c earlier this week, I thought it useful to prototype some things in the tidstore, at which point I was reminded it no longer compiles because of my recent work. I put in the necessary incantations so that the v32 tidstore compiles and passes tests, so here's a patchset for that (but no vacuum changes). I thought it was a good time to also condense it down to look more similar to previous patches, as a basis for future work.\n> >\n>\n> Thank you for updating the patch set. I'll look at updates closely\n> early next week.\n>\n\nI've run several benchmarks for v32, where before your recent change\nstarting, and v35 patch. Overall the numbers are better than the\nprevious version. Here is the test result where I used 1-byte value:\n\n\"select * from bench_load_random(10_000_000)\"\n\n* v35\n radix tree leaves: 192 total in 0 blocks; 0 empty blocks; 0 free (0\nchunks); 192 used\n radix tree node 256: 13697472 total in 205 blocks; 0 empty blocks;\n52400 free (25 chunks); 13645072 used\n radix tree node 125: 86630592 total in 2115 blocks; 0 empty blocks;\n7859376 free (6102 chunks); 78771216 used\n radix tree node 32: 94912 total in 0 blocks; 10 empty blocks; 0 free\n(0 chunks); 94912 used\n radix tree node 15: 9269952 total in 1136 blocks; 0 empty blocks;\n168 free (1 chunks); 9269784 used\n radix tree node 3: 1915502784 total in 233826 blocks; 0 empty\nblocks; 6560 free (164 chunks); 1915496224 used\n mem_allocated | load_ms\n---------------+---------\n 2025194752 | 3011\n(1 row)\n\n* v32\n radix tree node 256: 192 total in 0 blocks; 0 empty blocks; 0 free\n(0 chunks); 192 used\n radix tree node 256: 13487552 total in 205 blocks; 0 empty blocks;\n51600 free (25 chunks); 13435952 used\n radix tree node 125: 192 total in 0 blocks; 0 empty blocks; 0 free\n(0 chunks); 192 used\n radix tree node 125: 86630592 total in 2115 blocks; 0 empty blocks;\n7859376 free (6102 chunks); 78771216 used\n radix tree node 32: 192 total in 0 blocks; 0 empty blocks; 0 free (0\nchunks); 192 used\n radix tree node 32: 94912 total in 0 blocks; 10 empty blocks; 0 free\n(0 chunks); 94912 used\n radix tree node 15: 192 total in 0 blocks; 0 empty blocks; 0 free (0\nchunks); 192 used\n radix tree node 15: 9269952 total in 1136 blocks; 0 empty blocks;\n168 free (1 chunks); 9269784 used\n radix tree node 3: 241597002 total in 29499 blocks; 0 empty blocks;\n3864 free (161 chunks); 241593138 used\n radix tree node 3: 1809039552 total in 221696 blocks; 0 empty\nblocks; 5280 free (110 chunks); 1809034272 used\n mem_allocated | load_ms\n---------------+---------\n 2160118410 | 3069\n(1 row)\n\nAs you mentioned, the 1-byte value is embedded into 8 byte so 7 bytes\nare unused, but we use less memory since we use less slab contexts and\nsave fragmentations.\n\nI've also tested some large value cases (e.g. the value is 80-bytes)\nand got a similar result.\n\nRegarding the codes, there are many todo and fixme comments so it\nseems to me that your recent work is still in-progress. What is the\ncurrent status? Can I start reviewing the code or should I wait for a\nwhile until your recent work completes?\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 4 Jul 2023 14:48:39 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jul 4, 2023 at 12:49 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> As you mentioned, the 1-byte value is embedded into 8 byte so 7 bytes\n> are unused, but we use less memory since we use less slab contexts and\n> save fragmentations.\n\nThanks for testing. This tree is sparse enough that most of the space is\ntaken up by small inner nodes, and not by leaves. So, it's encouraging to\nsee a small space savings even here.\n\n> I've also tested some large value cases (e.g. the value is 80-bytes)\n> and got a similar result.\n\nInteresting. With a separate allocation per value the overhead would be 8\nbytes, or 10% here. It's plausible that savings elsewhere can hide that,\nglobally.\n\n> Regarding the codes, there are many todo and fixme comments so it\n> seems to me that your recent work is still in-progress. What is the\n> current status? Can I start reviewing the code or should I wait for a\n> while until your recent work completes?\n\nWell, it's going to be a bit of a mess until I can demonstrate it working\n(and working well) with bitmap heap scan. Fixing that now is just going to\ncreate conflicts. I do have a couple small older patches laying around that\nwere quick experiments -- I think at least some of them should give a\nperformance boost in loading speed, but haven't had time to test. Would you\nlike to take a look?\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Tue, Jul 4, 2023 at 12:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> As you mentioned, the 1-byte value is embedded into 8 byte so 7 bytes> are unused, but we use less memory since we use less slab contexts and> save fragmentations.Thanks for testing. This tree is sparse enough that most of the space is taken up by small inner nodes, and not by leaves. So, it's encouraging to see a small space savings even here.> I've also tested some large value cases (e.g. the value is 80-bytes)> and got a similar result.Interesting. With a separate allocation per value the overhead would be 8 bytes, or 10% here. It's plausible that savings elsewhere can hide that, globally.> Regarding the codes, there are many todo and fixme comments so it> seems to me that your recent work is still in-progress. What is the> current status? Can I start reviewing the code or should I wait for a> while until your recent work completes?Well, it's going to be a bit of a mess until I can demonstrate it working (and working well) with bitmap heap scan. Fixing that now is just going to create conflicts. I do have a couple small older patches laying around that were quick experiments -- I think at least some of them should give a performance boost in loading speed, but haven't had time to test. Would you like to take a look?--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 5 Jul 2023 18:21:20 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jul 5, 2023 at 8:21 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n> On Tue, Jul 4, 2023 at 12:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > As you mentioned, the 1-byte value is embedded into 8 byte so 7 bytes\n> > are unused, but we use less memory since we use less slab contexts and\n> > save fragmentations.\n>\n> Thanks for testing. This tree is sparse enough that most of the space is taken up by small inner nodes, and not by leaves. So, it's encouraging to see a small space savings even here.\n>\n> > I've also tested some large value cases (e.g. the value is 80-bytes)\n> > and got a similar result.\n>\n> Interesting. With a separate allocation per value the overhead would be 8 bytes, or 10% here. It's plausible that savings elsewhere can hide that, globally.\n>\n> > Regarding the codes, there are many todo and fixme comments so it\n> > seems to me that your recent work is still in-progress. What is the\n> > current status? Can I start reviewing the code or should I wait for a\n> > while until your recent work completes?\n>\n> Well, it's going to be a bit of a mess until I can demonstrate it working (and working well) with bitmap heap scan. Fixing that now is just going to create conflicts. I do have a couple small older patches laying around that were quick experiments -- I think at least some of them should give a performance boost in loading speed, but haven't had time to test. Would you like to take a look?\n\nYes, I can experiment with these patches in the meantime.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 7 Jul 2023 16:18:40 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jul 7, 2023 at 2:19 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> On Wed, Jul 5, 2023 at 8:21 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n> > Well, it's going to be a bit of a mess until I can demonstrate it\nworking (and working well) with bitmap heap scan. Fixing that now is just\ngoing to create conflicts. I do have a couple small older patches laying\naround that were quick experiments -- I think at least some of them should\ngive a performance boost in loading speed, but haven't had time to test.\nWould you like to take a look?\n>\n> Yes, I can experiment with these patches in the meantime.\n\nOkay, here it is in v36. 0001-6 are same as v35.\n\n0007 removes a wasted extra computation newly introduced by refactoring\ngrowing nodes. 0008 just makes 0011 nicer. Not worth testing by themselves,\nbut better to be tidy.\n0009 is an experiment to get rid of slow memmoves in node4, addressing a\nlong-standing inefficiency. It looks a bit tricky, but I think it's\nactually straightforward after drawing out the cases with pen and paper. It\nworks if the fanout is either 4 or 5, so we have some wiggle room. This may\ngive a noticeable boost if the input is reversed or random.\n0010 allows RT_EXTEND_DOWN to reduce function calls, so should help with\nsparse trees.\n0011 reduces function calls when growing the smaller nodes. Not sure about\nthis one -- possibly worth it for node4 only?\n\nIf these help, it'll show up more easily in smaller inputs. Large inputs\ntend to be more dominated by RAM latency.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Sat, 8 Jul 2023 09:54:19 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sat, Jul 8, 2023 at 11:54 AM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Fri, Jul 7, 2023 at 2:19 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Wed, Jul 5, 2023 at 8:21 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n> > > Well, it's going to be a bit of a mess until I can demonstrate it working (and working well) with bitmap heap scan. Fixing that now is just going to create conflicts. I do have a couple small older patches laying around that were quick experiments -- I think at least some of them should give a performance boost in loading speed, but haven't had time to test. Would you like to take a look?\n> >\n> > Yes, I can experiment with these patches in the meantime.\n>\n> Okay, here it is in v36. 0001-6 are same as v35.\n>\n> 0007 removes a wasted extra computation newly introduced by refactoring growing nodes. 0008 just makes 0011 nicer. Not worth testing by themselves, but better to be tidy.\n> 0009 is an experiment to get rid of slow memmoves in node4, addressing a long-standing inefficiency. It looks a bit tricky, but I think it's actually straightforward after drawing out the cases with pen and paper. It works if the fanout is either 4 or 5, so we have some wiggle room. This may give a noticeable boost if the input is reversed or random.\n> 0010 allows RT_EXTEND_DOWN to reduce function calls, so should help with sparse trees.\n> 0011 reduces function calls when growing the smaller nodes. Not sure about this one -- possibly worth it for node4 only?\n>\n> If these help, it'll show up more easily in smaller inputs. Large inputs tend to be more dominated by RAM latency.\n\nThanks for sharing the patches!\n\n0007, 0008, 0010, and 0011 are straightforward and agree to merge them.\n\nI have some questions on 0009 patch:\n\n+ /* shift chunks and children\n+\n+ Unfortunately, gcc has gotten too aggressive in\nturning simple loops\n+ into slow memmove's, so we have to be a bit more clever.\n+ See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101481\n+\n+ We take advantage of the fact that a good\n+ compiler can turn a memmove of a small constant power-of-two\n+ number of bytes into a single load/store.\n+ */\n\nAccording to the comment, this optimization is for only gcc? and there\nis no negative impact when building with other compilers such as clang\nby this change?\n\nI'm not sure that it's a good approach to hand-optimize the code much\nto generate better instructions on gcc. I think this change reduces\nreadability and maintainability. According to the bugzilla ticket\nreferred to in the comment, it's realized as a bug in the community,\nso once the gcc bug fixes, we might no longer need this trick, no?\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 13 Jul 2023 17:08:38 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn Thu, Jul 13, 2023 at 5:08 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Sat, Jul 8, 2023 at 11:54 AM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> >\n> >\n> > On Fri, Jul 7, 2023 at 2:19 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Wed, Jul 5, 2023 at 8:21 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n> > > > Well, it's going to be a bit of a mess until I can demonstrate it working (and working well) with bitmap heap scan. Fixing that now is just going to create conflicts. I do have a couple small older patches laying around that were quick experiments -- I think at least some of them should give a performance boost in loading speed, but haven't had time to test. Would you like to take a look?\n> > >\n> > > Yes, I can experiment with these patches in the meantime.\n> >\n> > Okay, here it is in v36. 0001-6 are same as v35.\n> >\n> > 0007 removes a wasted extra computation newly introduced by refactoring growing nodes. 0008 just makes 0011 nicer. Not worth testing by themselves, but better to be tidy.\n> > 0009 is an experiment to get rid of slow memmoves in node4, addressing a long-standing inefficiency. It looks a bit tricky, but I think it's actually straightforward after drawing out the cases with pen and paper. It works if the fanout is either 4 or 5, so we have some wiggle room. This may give a noticeable boost if the input is reversed or random.\n> > 0010 allows RT_EXTEND_DOWN to reduce function calls, so should help with sparse trees.\n> > 0011 reduces function calls when growing the smaller nodes. Not sure about this one -- possibly worth it for node4 only?\n> >\n> > If these help, it'll show up more easily in smaller inputs. Large inputs tend to be more dominated by RAM latency.\n\ncfbot reported some failures[1], and the v36 patch cannot be applied\ncleanly to the current HEAD. I've attached updated patches to make\ncfbot happy.\n\nRegards,\n\n[1] http://cfbot.cputube.org/highlights/all.html#3687\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 1 Aug 2023 17:12:05 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jul 13, 2023 at 3:09 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> 0007, 0008, 0010, and 0011 are straightforward and agree to merge them.\n\n[Part 1 - clear the deck of earlier performance work etc]\n\nThanks for taking a look! I've merged 0007 and 0008. The others need a\nperformance test to justify them -- an eyeball check is not enough. I've\nnow made the time to do that.\n\n==== sparse loads\n\nv38 0001-0006 (still using node3 for this test only):\n\nselect avg(load_ms) from generate_series(1,100) x(x), lateral (select *\nfrom bench_load_random_int(100 * 1000 * (1+x-x))) a;\n avg\n---------------------\n 27.1000000000000000\n\nselect avg(load_ms) from generate_series(1,30) x(x), lateral (select * from\nbench_load_random_int(500 * 1000 * (1+x-x))) a;\n avg\n----------------------\n 165.6333333333333333\n\nv38-0007-Optimize-RT_EXTEND_DOWN.patch\n\nselect avg(load_ms) from generate_series(1,100) x(x), lateral (select *\nfrom bench_load_random_int(100 * 1000 * (1+x-x))) a;\n avg\n---------------------\n 25.0900000000000000\n\nselect avg(load_ms) from generate_series(1,30) x(x), lateral (select * from\nbench_load_random_int(500 * 1000 * (1+x-x))) a;\n avg\n----------------------\n 157.3666666666666667\n\nThat seems worth doing.\n\nv38-0008-Use-4-children-for-node-4-also-attempt-portable-.patch\n\nThis combines two things because I messed up a rebase: Use fanout of 4, and\ntry some macros for shmem sizes, both 32- and 64-bit. Looking at this much,\nI no longer have a goal to have a separate set of size-classes for non-SIMD\nplatforms, because that would cause global maintenance problems -- it's\nprobably better to reduce worst-case search time where necessary. That\nwould be much more localized.\n\n> I have some questions on 0009 patch:\n\n> According to the comment, this optimization is for only gcc?\n\nNo, not at all. That tells me the comment is misleading.\n\n> I think this change reduces\n> readability and maintainability.\n\nWell, that much is obvious. What is not obvious is how much it gains us\nover the alternatives. I do have a simpler idea, though...\n\n==== load mostly node4\n\nselect * from bench_search_random_nodes(250*1000, '0xFFFFFF');\nn4 = 42626, n16 = 21492, n32 = 0, n64 = 0, n256 = 257\n mem_allocated | load_ms | search_ms\n---------------+---------+-----------\n 7352384 | 25 | 0\n\nv38-0009-TEMP-take-out-search-time-from-bench.patch\n\nThis is just to allow LATERAL queries for better measurements.\n\nselect avg(load_ms) from generate_series(1,100) x(x), lateral (select *\nfrom bench_search_random_nodes(250*1000 * (1+x-x), '0xFFFFFF')) a;\n\n avg\n---------------------\n 24.8333333333333333\n\nv38-0010-Try-a-simpler-way-to-avoid-memmove.patch\n\nThis slightly rewrites the standard loop so that gcc doesn't turn it into a\nmemmove(). Unlike the patch you didn't like, this *is* gcc-specific. (needs\na comment, which I forgot)\n\n avg\n---------------------\n 21.9600000000000000\n\nSo, that's not a trivial difference. I wasn't a big fan of Andres'\n__asm(\"\") workaround, but that may be just my ignorance about it. We need\nsomething like either of the two.\n\nv38-0011-Optimize-add_child_4-take-2.patch\n avg\n---------------------\n 21.3500000000000000\n\nThis is possibly faster than v38-0010, but looking like not worth the\ncomplexity, assuming the other way avoids the bug going forward.\n\n> According to the bugzilla ticket\n> referred to in the comment, it's realized as a bug in the community,\n> so once the gcc bug fixes, we might no longer need this trick, no?\n\nNo comment in two years...\n\nv38-0013-Use-constant-for-initial-copy-of-chunks-and-chil.patch\n\nThis is the same as v37-0011. I wasn't quite satisfied with it since it\nstill has two memcpy() calls, but it actually seems to regress:\n\n avg\n---------------------\n 22.0900000000000000\n\nv38-0012-Use-branch-free-coding-to-skip-new-element-index.patch\n\nThis patch uses a single loop for the copy.\n\n avg\n---------------------\n 21.0300000000000000\n\nWithin noise level of v38-0011, but it's small and simple, so I like it, at\nleast for small arrays.\n\nv38-0014-node48-Remove-need-for-RIGHTMOST_ONE-in-radix-tr.patch\nv38-0015-node48-Remove-dead-code-by-using-loop-local-var.patch\n\nJust small cleanups.\n\nv38-0016-Use-memcpy-for-children-when-growing-into-node48.patch\n\nMakes sense, but untested.\n\n===============\n[Part 2]\n\nPer off-list discussion with Masahiko, it makes sense to take some of the\nideas I've used locally on tidbitmap, and start incorporating them into\nearlier vacuum work to get that out the door faster. With that in mind...\n\nv38-0017-Make-tidstore-more-similar-to-tidbitmap.patch\n\nThis uses a simplified PagetableEntry (unimaginatively called\nBlocktableEntry just to avoid confusion), to be replaced with the real\nthing at a later date. This is still fixed size, to be replaced with a\nvarlen type.\n\nLooking at the tidstore tests again after some months, I'm not particularly\npleased with the amount of code required for how little it seems to be\ntesting, nor the output when something fails. (I wonder how hard it would\nbe to have SQL functions that add blocks/offsets to the tid store, and emit\ntuples of tids found in the store.)\n\nI'm also concerned about the number of places that have to know if the\nstore is using shared memory or not. Something to think about later.\n\nv38-0018-Consolidate-inserting-updating-values.patch\n\nThis is something I coded up to get to an API more similar to one in\nsimplehash, as used in tidbitmap.c. It seem worth doing on its own to\nreduce code duplication, and also simplifies coding of varlen types and\n\"runtime-embeddable values\".\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Mon, 14 Aug 2023 18:05:21 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn Mon, Aug 14, 2023 at 8:05 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Thu, Jul 13, 2023 at 3:09 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > 0007, 0008, 0010, and 0011 are straightforward and agree to merge them.\n\nThank you for updating the patch!\n\n>\n> [Part 1 - clear the deck of earlier performance work etc]\n>\n> Thanks for taking a look! I've merged 0007 and 0008. The others need a performance test to justify them -- an eyeball check is not enough. I've now made the time to do that.\n>\n> ==== sparse loads\n>\n> v38 0001-0006 (still using node3 for this test only):\n>\n> select avg(load_ms) from generate_series(1,100) x(x), lateral (select * from bench_load_random_int(100 * 1000 * (1+x-x))) a;\n> avg\n> ---------------------\n> 27.1000000000000000\n>\n> select avg(load_ms) from generate_series(1,30) x(x), lateral (select * from bench_load_random_int(500 * 1000 * (1+x-x))) a;\n> avg\n> ----------------------\n> 165.6333333333333333\n>\n> v38-0007-Optimize-RT_EXTEND_DOWN.patch\n>\n> select avg(load_ms) from generate_series(1,100) x(x), lateral (select * from bench_load_random_int(100 * 1000 * (1+x-x))) a;\n> avg\n> ---------------------\n> 25.0900000000000000\n>\n> select avg(load_ms) from generate_series(1,30) x(x), lateral (select * from bench_load_random_int(500 * 1000 * (1+x-x))) a;\n> avg\n> ----------------------\n> 157.3666666666666667\n>\n> That seems worth doing.\n>\n> v38-0008-Use-4-children-for-node-4-also-attempt-portable-.patch\n>\n> This combines two things because I messed up a rebase: Use fanout of 4, and try some macros for shmem sizes, both 32- and 64-bit. Looking at this much, I no longer have a goal to have a separate set of size-classes for non-SIMD platforms, because that would cause global maintenance problems -- it's probably better to reduce worst-case search time where necessary. That would be much more localized.\n>\n> > I have some questions on 0009 patch:\n>\n> > According to the comment, this optimization is for only gcc?\n>\n> No, not at all. That tells me the comment is misleading.\n>\n> > I think this change reduces\n> > readability and maintainability.\n>\n> Well, that much is obvious. What is not obvious is how much it gains us over the alternatives. I do have a simpler idea, though...\n>\n> ==== load mostly node4\n>\n> select * from bench_search_random_nodes(250*1000, '0xFFFFFF');\n> n4 = 42626, n16 = 21492, n32 = 0, n64 = 0, n256 = 257\n> mem_allocated | load_ms | search_ms\n> ---------------+---------+-----------\n> 7352384 | 25 | 0\n>\n> v38-0009-TEMP-take-out-search-time-from-bench.patch\n>\n> This is just to allow LATERAL queries for better measurements.\n>\n> select avg(load_ms) from generate_series(1,100) x(x), lateral (select * from bench_search_random_nodes(250*1000 * (1+x-x), '0xFFFFFF')) a;\n>\n> avg\n> ---------------------\n> 24.8333333333333333\n\n0007, 0008, and 0009 look good to me.\n\n>\n> v38-0010-Try-a-simpler-way-to-avoid-memmove.patch\n>\n> This slightly rewrites the standard loop so that gcc doesn't turn it into a memmove(). Unlike the patch you didn't like, this *is* gcc-specific. (needs a comment, which I forgot)\n>\n> avg\n> ---------------------\n> 21.9600000000000000\n>\n> So, that's not a trivial difference. I wasn't a big fan of Andres' __asm(\"\") workaround, but that may be just my ignorance about it. We need something like either of the two.\n>\n> v38-0011-Optimize-add_child_4-take-2.patch\n> avg\n> ---------------------\n> 21.3500000000000000\n>\n> This is possibly faster than v38-0010, but looking like not worth the complexity, assuming the other way avoids the bug going forward.\n\nI prefer 0010 but is it worth testing with other compilers such as clang?\n\n>\n> > According to the bugzilla ticket\n> > referred to in the comment, it's realized as a bug in the community,\n> > so once the gcc bug fixes, we might no longer need this trick, no?\n>\n> No comment in two years...\n>\n> v38-0013-Use-constant-for-initial-copy-of-chunks-and-chil.patch\n>\n> This is the same as v37-0011. I wasn't quite satisfied with it since it still has two memcpy() calls, but it actually seems to regress:\n>\n> avg\n> ---------------------\n> 22.0900000000000000\n>\n> v38-0012-Use-branch-free-coding-to-skip-new-element-index.patch\n>\n> This patch uses a single loop for the copy.\n>\n> avg\n> ---------------------\n> 21.0300000000000000\n>\n> Within noise level of v38-0011, but it's small and simple, so I like it, at least for small arrays.\n\nAgreed.\n\n>\n> v38-0014-node48-Remove-need-for-RIGHTMOST_ONE-in-radix-tr.patch\n> v38-0015-node48-Remove-dead-code-by-using-loop-local-var.patch\n>\n> Just small cleanups.\n>\n> v38-0016-Use-memcpy-for-children-when-growing-into-node48.patch\n>\n> Makes sense, but untested.\n\nAgreed.\n\nBTW cfbot reported that some regression tests failed due to OOM. I've\nattached the patch to fix it.\n\n>\n> ===============\n> [Part 2]\n>\n> Per off-list discussion with Masahiko, it makes sense to take some of the ideas I've used locally on tidbitmap, and start incorporating them into earlier vacuum work to get that out the door faster. With that in mind...\n>\n> v38-0017-Make-tidstore-more-similar-to-tidbitmap.patch\n>\n> This uses a simplified PagetableEntry (unimaginatively called BlocktableEntry just to avoid confusion), to be replaced with the real thing at a later date. This is still fixed size, to be replaced with a varlen type.\n\nThat's more readable.\n\n>\n> Looking at the tidstore tests again after some months, I'm not particularly pleased with the amount of code required for how little it seems to be testing, nor the output when something fails. (I wonder how hard it would be to have SQL functions that add blocks/offsets to the tid store, and emit tuples of tids found in the store.)\n\nIt would not be hard to have such SQL functions. I'll try it.\n\n>\n> I'm also concerned about the number of places that have to know if the store is using shared memory or not. Something to think about later.\n>\n> v38-0018-Consolidate-inserting-updating-values.patch\n>\n> This is something I coded up to get to an API more similar to one in simplehash, as used in tidbitmap.c. It seem worth doing on its own to reduce code duplication, and also simplifies coding of varlen types and \"runtime-embeddable values\".\n\nAgreed.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 15 Aug 2023 11:33:38 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Aug 15, 2023 at 9:34 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n\n> BTW cfbot reported that some regression tests failed due to OOM. I've\n> attached the patch to fix it.\n\nSeems worth doing now rather than later, so added this and squashed most of\nthe rest together. I wonder if that test uses too much memory in general.\nMaybe using the full uint64 is too much.\n\n> On Mon, Aug 14, 2023 at 8:05 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n\n> > This is possibly faster than v38-0010, but looking like not worth the\ncomplexity, assuming the other way avoids the bug going forward.\n>\n> I prefer 0010 but is it worth testing with other compilers such as clang?\n\nOkay, keeping 0010 with a comment, and leaving out 0011 for now. Clang is\naggressive about unrolling loops, so may be worth looking globally at some\npoint.\n\n> > v38-0012-Use-branch-free-coding-to-skip-new-element-index.patch\n\n> > Within noise level of v38-0011, but it's small and simple, so I like\nit, at least for small arrays.\n>\n> Agreed.\n\nKeeping 0012 and not 0013.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Tue, 15 Aug 2023 18:53:05 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Aug 15, 2023 at 6:53 PM John Naylor <john.naylor@enterprisedb.com>\nwrote:\n>\n> On Tue, Aug 15, 2023 at 9:34 AM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> > BTW cfbot reported that some regression tests failed due to OOM. I've\n> > attached the patch to fix it.\n>\n> Seems worth doing now rather than later, so added this and squashed most\nof the rest together.\n\nThis segfaults because of a mistake fixing a rebase conflict, so v40\nattached.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com", "msg_date": "Wed, 16 Aug 2023 18:04:34 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Aug 16, 2023 at 8:04 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Tue, Aug 15, 2023 at 6:53 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n> >\n> > On Tue, Aug 15, 2023 at 9:34 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > BTW cfbot reported that some regression tests failed due to OOM. I've\n> > > attached the patch to fix it.\n> >\n> > Seems worth doing now rather than later, so added this and squashed most of the rest together.\n>\n> This segfaults because of a mistake fixing a rebase conflict, so v40 attached.\n>\n\nThank you for updating the patch set.\n\nOn Tue, Aug 15, 2023 at 11:33 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> On Mon, Aug 14, 2023 at 8:05 PM John Naylor\n> <john.naylor@enterprisedb.com> wrote:\n> > Looking at the tidstore tests again after some months, I'm not particularly pleased with the amount of code required for how little it seems to be testing, nor the output when something fails. (I wonder how hard it would be to have SQL functions that add blocks/offsets to the tid store, and emit tuples of tids found in the store.)\n>\n> It would not be hard to have such SQL functions. I'll try it.\n\nI've updated the regression tests for tidstore so that it uses SQL\nfunctions to add blocks/offsets and dump its contents. The new test\ncovers the same test coverages but it's executed using SQL functions\ninstead of executing all tests in one SQL function.\n\n0008 patch fixes a bug in tidstore which I found during this work. We\ndidn't recreate the radix tree in the same memory context when\nTidStoreReset().\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Sun, 27 Aug 2023 21:53:00 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sun, Aug 27, 2023 at 7:53 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> I've updated the regression tests for tidstore so that it uses SQL\n> functions to add blocks/offsets and dump its contents. The new test\n> covers the same test coverages but it's executed using SQL functions\n> instead of executing all tests in one SQL function.\n\nThis is much nicer and more flexible, thanks! A few questions/comments:\n\ntidstore_dump_tids() returns a string -- is it difficult to turn this into\na SRF, or is it just a bit more work?\n\nThe lookup test seems fine for now. The output would look nicer with an\n\"order by tid\".\n\nI think we could have the SQL function tidstore_create() take a boolean for\nshared memory. That would allow ad-hoc testing without a recompile, if I'm\nnot mistaken.\n\n+SELECT tidstore_set_block_offsets(blk, array_agg(offsets.off)::int2[])\n+ FROM blocks, offsets\n+ GROUP BY blk;\n+ tidstore_set_block_offsets\n+----------------------------\n+\n+\n+\n+\n+\n+(5 rows)\n\nCalling a void function multiple times leads to vertical whitespace, which\nlooks a bit strange and may look better with some output, even if\nirrelevant:\n\n-SELECT tidstore_set_block_offsets(blk, array_agg(offsets.off)::int2[])\n+SELECT row_number() over(order by blk), tidstore_set_block_offsets(blk,\narray_agg(offsets.off)::int2[])\n\n row_number | tidstore_set_block_offsets\n------------+----------------------------\n 1 |\n 2 |\n 3 |\n 4 |\n 5 |\n(5 rows)\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Sun, Aug 27, 2023 at 7:53 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> I've updated the regression tests for tidstore so that it uses SQL> functions to add blocks/offsets and dump its contents. The new test> covers the same test coverages but it's executed using SQL functions> instead of executing all tests in one SQL function.This is much nicer and more flexible, thanks! A few questions/comments:tidstore_dump_tids() returns a string -- is it difficult to turn this into a SRF, or is it just a bit more work?The lookup test seems fine for now. The output would look nicer with an \"order by tid\".I think we could have the SQL function tidstore_create() take a boolean for shared memory. That would allow ad-hoc testing without a recompile, if I'm not mistaken.+SELECT tidstore_set_block_offsets(blk, array_agg(offsets.off)::int2[])+  FROM blocks, offsets+  GROUP BY blk;+ tidstore_set_block_offsets+----------------------------++++++(5 rows)Calling a void function multiple times leads to vertical whitespace, which looks a bit strange and may look better with some output, even if irrelevant:-SELECT tidstore_set_block_offsets(blk, array_agg(offsets.off)::int2[])+SELECT row_number() over(order by blk), tidstore_set_block_offsets(blk, array_agg(offsets.off)::int2[]) row_number | tidstore_set_block_offsets------------+----------------------------          1 |          2 |          3 |          4 |          5 |(5 rows)--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Mon, 28 Aug 2023 14:19:50 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Aug 28, 2023 at 4:20 PM John Naylor\n<john.naylor@enterprisedb.com> wrote:\n>\n> On Sun, Aug 27, 2023 at 7:53 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > I've updated the regression tests for tidstore so that it uses SQL\n> > functions to add blocks/offsets and dump its contents. The new test\n> > covers the same test coverages but it's executed using SQL functions\n> > instead of executing all tests in one SQL function.\n>\n> This is much nicer and more flexible, thanks! A few questions/comments:\n>\n> tidstore_dump_tids() returns a string -- is it difficult to turn this into a SRF, or is it just a bit more work?\n\nIt's not difficult. I've changed it in v42 patch.\n\n>\n> The lookup test seems fine for now. The output would look nicer with an \"order by tid\".\n\nAgreed.\n\n>\n> I think we could have the SQL function tidstore_create() take a boolean for shared memory. That would allow ad-hoc testing without a recompile, if I'm not mistaken.\n\nAgreed.\n\n>\n> +SELECT tidstore_set_block_offsets(blk, array_agg(offsets.off)::int2[])\n> + FROM blocks, offsets\n> + GROUP BY blk;\n> + tidstore_set_block_offsets\n> +----------------------------\n> +\n> +\n> +\n> +\n> +\n> +(5 rows)\n>\n> Calling a void function multiple times leads to vertical whitespace, which looks a bit strange and may look better with some output, even if irrelevant:\n>\n> -SELECT tidstore_set_block_offsets(blk, array_agg(offsets.off)::int2[])\n> +SELECT row_number() over(order by blk), tidstore_set_block_offsets(blk, array_agg(offsets.off)::int2[])\n>\n> row_number | tidstore_set_block_offsets\n> ------------+----------------------------\n> 1 |\n> 2 |\n> 3 |\n> 4 |\n> 5 |\n> (5 rows)\n\nYes, it looks better.\n\nI've attached v42 patch set. I improved tidstore regression test codes\nin addition of imcorporating the above comments.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 28 Aug 2023 23:43:22 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Aug 28, 2023 at 9:44 PM Masahiko Sawada <sawada.mshk@gmail.com>\nwrote:\n>\n> I've attached v42 patch set. I improved tidstore regression test codes\n> in addition of imcorporating the above comments.\n\nSeems fine at a glance, thanks. I will build on this to implement\nvariable-length values. I have already finished one prerequisite which is:\npublic APIs passing pointers to values.\n\n--\nJohn Naylor\nEDB: http://www.enterprisedb.com\n\nOn Mon, Aug 28, 2023 at 9:44 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:>> I've attached v42 patch set. I improved tidstore regression test codes> in addition of imcorporating the above comments.Seems fine at a glance, thanks. I will build on this to implement variable-length values. I have already finished one prerequisite which is: public APIs passing pointers to values.--John NaylorEDB: http://www.enterprisedb.com", "msg_date": "Wed, 6 Sep 2023 13:23:43 +0700", "msg_from": "John Naylor <john.naylor@enterprisedb.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Sep 6, 2023 at 3:23 PM John Naylor <john.naylor@enterprisedb.com> wrote:\n>\n>\n> On Mon, Aug 28, 2023 at 9:44 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > I've attached v42 patch set. I improved tidstore regression test codes\n> > in addition of imcorporating the above comments.\n>\n> Seems fine at a glance, thanks. I will build on this to implement variable-length values.\n\nThanks.\n\n> I have already finished one prerequisite which is: public APIs passing pointers to values.\n\nGreat!\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 6 Sep 2023 21:54:56 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2023-08-28 23:43:22 +0900, Masahiko Sawada wrote:\n> I've attached v42 patch set. I improved tidstore regression test codes\n> in addition of imcorporating the above comments.\n\nWhy did you need to disable the benchmark module for CI?\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Fri, 15 Sep 2023 17:03:12 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sat, Sep 16, 2023 at 9:03 AM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n> On 2023-08-28 23:43:22 +0900, Masahiko Sawada wrote:\n> > I've attached v42 patch set. I improved tidstore regression test codes\n> > in addition of imcorporating the above comments.\n>\n> Why did you need to disable the benchmark module for CI?\n\nI didn't want to unnecessarily make cfbot unhappy since the benchmark\nmodule is not going to get committed to the core and sometimes not\nup-to-date.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Sun, 17 Sep 2023 12:21:47 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I wrote:\n\n> Seems fine at a glance, thanks. I will build on this to implement variable-length values. I have already finished one prerequisite which is: public APIs passing pointers to values.\n\nSince my publishing schedule has not kept up, I'm just going to share\nsomething similar to what I mentioned earlier, just to get things\nmoving again.\n\n0001-0009 are from earlier versions, except for 0007 which makes a\nbunch of superficial naming updates, similar to those done in a recent\nother version. Somewhere along the way I fixed long-standing git\nwhitespace warnings, but I don't remember if that's new here. In any\ncase, let's try to preserve that.\n\n0010 is some minor refactoring to reduce duplication\n\n0011-0014 add public functions that give the caller more control over\nthe input and responsibility for locking. They are not named well, but\nI plan these to be temporary: They are currently used for the tidstore\nonly, since that has much simpler tests than the standard radix tree\ntests. One thing to note: since the tidstore has always done it's own\nlocking within a larger structure, these patches don't bother to do\nlocking at the radix tree level. Locking twice seems...not great.\nThese patches are the main prerequisite for variable-length values.\nOnce that is working well, we can switch the standard tests to the new\nAPIs.\n\nNext steps include (some of these were briefly discussed off-list with\nSawada-san):\n\n- template parameter for varlen values\n- some callers to pass length in bytes\n- block entries to have num_elems for # of bitmap words\n- a way for updates to re-alloc values when needed\n- aset allocation for values when appropriate", "msg_date": "Sat, 28 Oct 2023 15:56:42 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sat, Oct 28, 2023 at 5:56 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> I wrote:\n>\n> > Seems fine at a glance, thanks. I will build on this to implement variable-length values. I have already finished one prerequisite which is: public APIs passing pointers to values.\n>\n> Since my publishing schedule has not kept up, I'm just going to share\n> something similar to what I mentioned earlier, just to get things\n> moving again.\n\nThanks for sharing the updates. I've returned to work today and will\nresume working on this feature.\n\n>\n> 0001-0009 are from earlier versions, except for 0007 which makes a\n> bunch of superficial naming updates, similar to those done in a recent\n> other version. Somewhere along the way I fixed long-standing git\n> whitespace warnings, but I don't remember if that's new here. In any\n> case, let's try to preserve that.\n>\n> 0010 is some minor refactoring to reduce duplication\n>\n> 0011-0014 add public functions that give the caller more control over\n> the input and responsibility for locking. They are not named well, but\n> I plan these to be temporary: They are currently used for the tidstore\n> only, since that has much simpler tests than the standard radix tree\n> tests. One thing to note: since the tidstore has always done it's own\n> locking within a larger structure, these patches don't bother to do\n> locking at the radix tree level. Locking twice seems...not great.\n> These patches are the main prerequisite for variable-length values.\n> Once that is working well, we can switch the standard tests to the new\n> APIs.\n\nSince the variable-length values support is a big deal and would be\nrelated to API design I'd like to discuss the API design first.\nCurrently, we have the following APIs:\n\n---\nRT_VALUE_TYPE\nRT_GET(RT_RADIX_TREE *tree, uint64 key, bool *found);\nor for variable-length value support,\nRT_GET(RT_RADIX_TREE *tree, uint64 key, size_t sz, bool *found);\n\nIf an entry already exists, return its pointer and set \"found\" to\ntrue. Otherwize, insert an empty value with sz bytes, return its\npointer, and set \"found\" to false.\n\n---\nRT_VALUE_TYPE\nRT_FIND(RT_RADIX_TREE *tree, uint64 key);\n\nIf an entry exists, return the pointer to the value, otherwise return NULL.\n\n(I omitted RT_SEARCH() as it's essentially the same as RT_FIND() and\nwill probably get removed.)\n\n---\nbool\nRT_SET(RT_RADIX_TREE *tree, uint64 key, RT_VALUE_TYPE *value_p);\nor for variable-length value support,\nRT_SET(RT_RADIX_TREE *tree, uint64 key, RT_VALUE_TYPE *value_p, size_t sz);\n\nIf an entry already exists, update its value to 'value_p' and return\ntrue. Otherwise set the value and return false.\n\nGiven variable-length value support, RT_GET() would have to do\nrepalloc() if the existing value size is not big enough for the new\nvalue, but it cannot as the radix tree doesn't know the size of each\nstored value. Another idea is that the radix tree returns the pointer\nto the slot and the caller updates the value accordingly. But it means\nthat the caller has to update the slot properly while considering the\nvalue size (embedded vs. single-leave value), which seems not a good\nidea.\n\nTo deal with this problem, I think we can somewhat change RT_GET() API\nas follow:\n\nRT_VALUE_TYPE\nRT_INSERT(RT_RADIX_TREE *tree, uint64 key, size_t sz, bool *found);\n\nIf the entry already exists, replace the value with a new empty value\nwith sz bytes and set \"found\" to true. Otherwise, insert an empty\nvalue, return its pointer, and set \"found\" to false.\n\nWe probably will find a better name but I use RT_INSERT() for\ndiscussion. RT_INSERT() returns an empty slot regardless of existing\nvalues. It can be used to insert a new value or to replace the value\nwith a larger value.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 27 Nov 2023 15:45:18 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Nov 27, 2023 at 1:45 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> Since the variable-length values support is a big deal and would be\n> related to API design I'd like to discuss the API design first.\n\nThanks for the fine summary of the issues here.\n\n[Swapping this back in my head]\n\n> RT_VALUE_TYPE\n> RT_GET(RT_RADIX_TREE *tree, uint64 key, bool *found);\n> or for variable-length value support,\n> RT_GET(RT_RADIX_TREE *tree, uint64 key, size_t sz, bool *found);\n>\n> If an entry already exists, return its pointer and set \"found\" to\n> true. Otherwize, insert an empty value with sz bytes, return its\n> pointer, and set \"found\" to false.\n\n> ---\n> bool\n> RT_SET(RT_RADIX_TREE *tree, uint64 key, RT_VALUE_TYPE *value_p);\n> or for variable-length value support,\n> RT_SET(RT_RADIX_TREE *tree, uint64 key, RT_VALUE_TYPE *value_p, size_t sz);\n>\n> If an entry already exists, update its value to 'value_p' and return\n> true. Otherwise set the value and return false.\n\nI'd have to double-check, but I think RT_SET is vestigial and I'm not\nsure it has any advantage over RT_GET as I've sketched it out. I'm\npretty sure it's only there now because changing the radix tree\nregression tests is much harder than changing TID store.\n\n> Given variable-length value support, RT_GET() would have to do\n> repalloc() if the existing value size is not big enough for the new\n> value, but it cannot as the radix tree doesn't know the size of each\n> stored value.\n\nI think we have two choices:\n\n- the value stores the \"length\". The caller would need to specify a\nfunction to compute size from the \"length\" member. Note this assumes\nthere is an array. I think both aspects are not great.\n- the value stores the \"size\". Callers that store an array (as\nPageTableEntry's do) would compute length when they need to. This\nsounds easier.\n\n> Another idea is that the radix tree returns the pointer\n> to the slot and the caller updates the value accordingly.\n\nI did exactly this in v43 TidStore if I understood you correctly. If I\nmisunderstood you, can you clarify?\n\n> But it means\n> that the caller has to update the slot properly while considering the\n> value size (embedded vs. single-leave value), which seems not a good\n> idea.\n\nFor this optimization, callers will have to know about pointer-sized\nvalues and treat them differently, but they don't need to know the\ndetails about how where they are stored.\n\nWhile we want to keep embedded values in the back of our minds, I\nreally think the details should be postponed to a follow-up commit.\n\n> To deal with this problem, I think we can somewhat change RT_GET() API\n> as follow:\n>\n> RT_VALUE_TYPE\n> RT_INSERT(RT_RADIX_TREE *tree, uint64 key, size_t sz, bool *found);\n>\n> If the entry already exists, replace the value with a new empty value\n> with sz bytes and set \"found\" to true. Otherwise, insert an empty\n> value, return its pointer, and set \"found\" to false.\n>\n> We probably will find a better name but I use RT_INSERT() for\n> discussion. RT_INSERT() returns an empty slot regardless of existing\n> values. It can be used to insert a new value or to replace the value\n> with a larger value.\n\nFor the case we are discussing, bitmaps, updating an existing value is\na bit tricky. We need the existing value to properly update it with\nset or unset bits. This can't work in general without a lot of work\nfor the caller.\n\nHowever, for vacuum, we have all values that we need up front. That\ngives me an idea: Something like this insert API could be optimized\nfor \"insert-only\": If we only free values when we free the whole tree\nat the end, that's a clear use case for David Rowley's proposed \"bump\ncontext\", which would save 8 bytes per allocation and be a bit faster.\n[1] (RT_GET for varlen values would use an aset context, to allow\nrepalloc, and nodes would continue to use slab).\n\n[1] https://www.postgresql.org/message-id/flat/CAApHDvqGSpCU95TmM=Bp=6xjL_nLys4zdZOpfNyWBk97Xrdj2w@mail.gmail.com\n\n\n", "msg_date": "Mon, 4 Dec 2023 15:21:16 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Dec 4, 2023 at 5:21 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Mon, Nov 27, 2023 at 1:45 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > Since the variable-length values support is a big deal and would be\n> > related to API design I'd like to discuss the API design first.\n>\n> Thanks for the fine summary of the issues here.\n>\n> [Swapping this back in my head]\n>\n> > RT_VALUE_TYPE\n> > RT_GET(RT_RADIX_TREE *tree, uint64 key, bool *found);\n> > or for variable-length value support,\n> > RT_GET(RT_RADIX_TREE *tree, uint64 key, size_t sz, bool *found);\n> >\n> > If an entry already exists, return its pointer and set \"found\" to\n> > true. Otherwize, insert an empty value with sz bytes, return its\n> > pointer, and set \"found\" to false.\n>\n> > ---\n> > bool\n> > RT_SET(RT_RADIX_TREE *tree, uint64 key, RT_VALUE_TYPE *value_p);\n> > or for variable-length value support,\n> > RT_SET(RT_RADIX_TREE *tree, uint64 key, RT_VALUE_TYPE *value_p, size_t sz);\n> >\n> > If an entry already exists, update its value to 'value_p' and return\n> > true. Otherwise set the value and return false.\n>\n> I'd have to double-check, but I think RT_SET is vestigial and I'm not\n> sure it has any advantage over RT_GET as I've sketched it out. I'm\n> pretty sure it's only there now because changing the radix tree\n> regression tests is much harder than changing TID store.\n\nAgreed.\n\n>\n> > Given variable-length value support, RT_GET() would have to do\n> > repalloc() if the existing value size is not big enough for the new\n> > value, but it cannot as the radix tree doesn't know the size of each\n> > stored value.\n>\n> I think we have two choices:\n>\n> - the value stores the \"length\". The caller would need to specify a\n> function to compute size from the \"length\" member. Note this assumes\n> there is an array. I think both aspects are not great.\n> - the value stores the \"size\". Callers that store an array (as\n> PageTableEntry's do) would compute length when they need to. This\n> sounds easier.\n\nAs for the second idea, do we always need to require the value to have\nthe \"size\" (e.g. int32) in the first field of its struct? If so, the\ncaller will be able to use only 4 bytes in embedded value cases (or\nwon't be able to use at all if the pointer size is 4 bytes).\n\n>\n> > Another idea is that the radix tree returns the pointer\n> > to the slot and the caller updates the value accordingly.\n>\n> I did exactly this in v43 TidStore if I understood you correctly. If I\n> misunderstood you, can you clarify?\n\nI meant to expose RT_GET_SLOT_RECURSIVE() so that the caller updates\nthe value as they want.\n\n>\n> > But it means\n> > that the caller has to update the slot properly while considering the\n> > value size (embedded vs. single-leave value), which seems not a good\n> > idea.\n>\n> For this optimization, callers will have to know about pointer-sized\n> values and treat them differently, but they don't need to know the\n> details about how where they are stored.\n>\n> While we want to keep embedded values in the back of our minds, I\n> really think the details should be postponed to a follow-up commit.\n\nAgreed.\n\n>\n> > To deal with this problem, I think we can somewhat change RT_GET() API\n> > as follow:\n> >\n> > RT_VALUE_TYPE\n> > RT_INSERT(RT_RADIX_TREE *tree, uint64 key, size_t sz, bool *found);\n> >\n> > If the entry already exists, replace the value with a new empty value\n> > with sz bytes and set \"found\" to true. Otherwise, insert an empty\n> > value, return its pointer, and set \"found\" to false.\n> >\n> > We probably will find a better name but I use RT_INSERT() for\n> > discussion. RT_INSERT() returns an empty slot regardless of existing\n> > values. It can be used to insert a new value or to replace the value\n> > with a larger value.\n>\n> For the case we are discussing, bitmaps, updating an existing value is\n> a bit tricky. We need the existing value to properly update it with\n> set or unset bits. This can't work in general without a lot of work\n> for the caller.\n\nTrue.\n\n>\n> However, for vacuum, we have all values that we need up front. That\n> gives me an idea: Something like this insert API could be optimized\n> for \"insert-only\": If we only free values when we free the whole tree\n> at the end, that's a clear use case for David Rowley's proposed \"bump\n> context\", which would save 8 bytes per allocation and be a bit faster.\n> [1] (RT_GET for varlen values would use an aset context, to allow\n> repalloc, and nodes would continue to use slab).\n\nInteresting idea and worth trying it. Do we need to protect the whole\ntree as insert-only for safety? It's problematic if the user uses\nmixed RT_INSERT() and RT_GET().\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 6 Dec 2023 06:33:33 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Dec 6, 2023 at 4:34 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Dec 4, 2023 at 5:21 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > > Given variable-length value support, RT_GET() would have to do\n> > > repalloc() if the existing value size is not big enough for the new\n> > > value, but it cannot as the radix tree doesn't know the size of each\n> > > stored value.\n> >\n> > I think we have two choices:\n> >\n> > - the value stores the \"length\". The caller would need to specify a\n> > function to compute size from the \"length\" member. Note this assumes\n> > there is an array. I think both aspects are not great.\n> > - the value stores the \"size\". Callers that store an array (as\n> > PageTableEntry's do) would compute length when they need to. This\n> > sounds easier.\n>\n> As for the second idea, do we always need to require the value to have\n> the \"size\" (e.g. int32) in the first field of its struct? If so, the\n> caller will be able to use only 4 bytes in embedded value cases (or\n> won't be able to use at all if the pointer size is 4 bytes).\n\nWe could have an RT_SIZE_TYPE for varlen value types. That's easy.\nThere is another way, though: (This is a digression into embedded\nvalues, but it does illuminate some issues even aside from that)\n\nMy thinking a while ago was that an embedded value had no explicit\nlength/size, but could be \"expanded\" into a conventional value for the\ncaller. For bitmaps, the smallest full value would have length 1 and\nwhatever size (For tid store maybe 16 bytes). This would happen\nautomatically via a template function.\n\nNow I think that could be too complicated (especially for page table\nentries, which have more bookkeeping than vacuum needs) and slow.\nImagine this as an embedded value:\n\ntypedef struct BlocktableEntry\n{\n uint16 size;\n\n /* later: uint8 flags; for bitmap scan */\n\n /* 64 bit: 3 elements , 32-bit: 1 element */\n OffsetNumber offsets[( sizeof(Pointer) - sizeof(int16) ) /\nsizeof(OffsetNumber)];\n\n /* end of embeddable value */\n\n bitmapword words[FLEXIBLE_ARRAY_MEMBER];\n} BlocktableEntry;\n\nHere we can use a slot to store up to 3 offsets, no matter how big\nthey are. That's great because a bitmap could be mostly wasted space.\nBut now the caller can't know up front how many bytes it needs until\nit retrieves the value and sees what's already there. If there are\nalready three values, the caller needs to tell the tree \"alloc this\nmuch, update this slot you just gave me with the alloc (maybe DSA)\npointer, and return the local pointer\". Then copy the 3 offsets into\nset bits, and set whatever else it needs to. With normal values, same\nthing, but with realloc.\n\nThis is a bit complex, but I see an advantage The tree doesn't need to\ncare so much about the size, so the value doesn't need to contain the\nsize. For our case, we can use length (number of bitmapwords) without\nthe disadvantages I mentioned above, with length zero (or maybe -1)\nmeaning \"no bitmapword array, the offsets are all in this small\narray\".\n\n> > > Another idea is that the radix tree returns the pointer\n> > > to the slot and the caller updates the value accordingly.\n> >\n> > I did exactly this in v43 TidStore if I understood you correctly. If I\n> > misunderstood you, can you clarify?\n>\n> I meant to expose RT_GET_SLOT_RECURSIVE() so that the caller updates\n> the value as they want.\n\nDid my sketch above get closer to that? Side note: I don't think we\ncan expose that directly (e.g. need to check for create or extend\nupwards), but some functionality can be a thin wrapper around it.\n\n> > However, for vacuum, we have all values that we need up front. That\n> > gives me an idea: Something like this insert API could be optimized\n> > for \"insert-only\": If we only free values when we free the whole tree\n> > at the end, that's a clear use case for David Rowley's proposed \"bump\n> > context\", which would save 8 bytes per allocation and be a bit faster.\n> > [1] (RT_GET for varlen values would use an aset context, to allow\n> > repalloc, and nodes would continue to use slab).\n>\n> Interesting idea and worth trying it. Do we need to protect the whole\n> tree as insert-only for safety? It's problematic if the user uses\n> mixed RT_INSERT() and RT_GET().\n\nYou're right, but I'm not sure what the policy should be.\n\n\n", "msg_date": "Wed, 6 Dec 2023 13:39:21 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Nov 27, 2023 at 1:45 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Sat, Oct 28, 2023 at 5:56 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> bool\n> RT_SET(RT_RADIX_TREE *tree, uint64 key, RT_VALUE_TYPE *value_p);\n> or for variable-length value support,\n> RT_SET(RT_RADIX_TREE *tree, uint64 key, RT_VALUE_TYPE *value_p, size_t sz);\n>\n> If an entry already exists, update its value to 'value_p' and return\n> true. Otherwise set the value and return false.\n\n> RT_VALUE_TYPE\n> RT_INSERT(RT_RADIX_TREE *tree, uint64 key, size_t sz, bool *found);\n>\n> If the entry already exists, replace the value with a new empty value\n> with sz bytes and set \"found\" to true. Otherwise, insert an empty\n> value, return its pointer, and set \"found\" to false.\n>\n> We probably will find a better name but I use RT_INSERT() for\n> discussion. RT_INSERT() returns an empty slot regardless of existing\n> values. It can be used to insert a new value or to replace the value\n> with a larger value.\n\nLooking at TidStoreSetBlockOffsets again (in particular how it works\nwith RT_GET), and thinking about issues we've discussed, I think\nRT_SET is sufficient for vacuum. Here's how it could work:\n\nTidStoreSetBlockOffsets could have a stack variable that's \"almost\nalways\" large enough. When not, it can allocate in its own context. It\nsets the necessary bits there. Then, it passes the pointer to RT_SET\nwith the number of bytes to copy. That seems very simple.\n\nAt some future time, we can add a new function with the complex\nbusiness about getting the current value to modify it, with the\nre-alloc'ing that it might require.\n\nIn other words, from both an API perspective and a performance\nperspective, it makes sense for tid store to have a simple \"set\"\ninterface for vacuum that can be optimized for its characteristics\n(insert only, ordered offsets). And also a more complex one for bitmap\nscan (setting/unsetting bits of existing values, in any order). They\ncan share the same iteration interface, key types, and value types.\n\nWhat do you think, Masahiko?\n\n\n", "msg_date": "Thu, 7 Dec 2023 10:27:00 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Dec 6, 2023 at 3:39 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Wed, Dec 6, 2023 at 4:34 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Dec 4, 2023 at 5:21 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > > Given variable-length value support, RT_GET() would have to do\n> > > > repalloc() if the existing value size is not big enough for the new\n> > > > value, but it cannot as the radix tree doesn't know the size of each\n> > > > stored value.\n> > >\n> > > I think we have two choices:\n> > >\n> > > - the value stores the \"length\". The caller would need to specify a\n> > > function to compute size from the \"length\" member. Note this assumes\n> > > there is an array. I think both aspects are not great.\n> > > - the value stores the \"size\". Callers that store an array (as\n> > > PageTableEntry's do) would compute length when they need to. This\n> > > sounds easier.\n> >\n> > As for the second idea, do we always need to require the value to have\n> > the \"size\" (e.g. int32) in the first field of its struct? If so, the\n> > caller will be able to use only 4 bytes in embedded value cases (or\n> > won't be able to use at all if the pointer size is 4 bytes).\n>\n> We could have an RT_SIZE_TYPE for varlen value types. That's easy.\n> There is another way, though: (This is a digression into embedded\n> values, but it does illuminate some issues even aside from that)\n>\n> My thinking a while ago was that an embedded value had no explicit\n> length/size, but could be \"expanded\" into a conventional value for the\n> caller. For bitmaps, the smallest full value would have length 1 and\n> whatever size (For tid store maybe 16 bytes). This would happen\n> automatically via a template function.\n>\n> Now I think that could be too complicated (especially for page table\n> entries, which have more bookkeeping than vacuum needs) and slow.\n> Imagine this as an embedded value:\n>\n> typedef struct BlocktableEntry\n> {\n> uint16 size;\n>\n> /* later: uint8 flags; for bitmap scan */\n>\n> /* 64 bit: 3 elements , 32-bit: 1 element */\n> OffsetNumber offsets[( sizeof(Pointer) - sizeof(int16) ) /\n> sizeof(OffsetNumber)];\n>\n> /* end of embeddable value */\n>\n> bitmapword words[FLEXIBLE_ARRAY_MEMBER];\n> } BlocktableEntry;\n>\n> Here we can use a slot to store up to 3 offsets, no matter how big\n> they are. That's great because a bitmap could be mostly wasted space.\n\nInteresting idea.\n\n> But now the caller can't know up front how many bytes it needs until\n> it retrieves the value and sees what's already there. If there are\n> already three values, the caller needs to tell the tree \"alloc this\n> much, update this slot you just gave me with the alloc (maybe DSA)\n> pointer, and return the local pointer\". Then copy the 3 offsets into\n> set bits, and set whatever else it needs to. With normal values, same\n> thing, but with realloc.\n>\n> This is a bit complex, but I see an advantage The tree doesn't need to\n> care so much about the size, so the value doesn't need to contain the\n> size. For our case, we can use length (number of bitmapwords) without\n> the disadvantages I mentioned above, with length zero (or maybe -1)\n> meaning \"no bitmapword array, the offsets are all in this small\n> array\".\n\nIt's still unclear to me why the value doesn't need to contain the size.\n\nIf I understand you correctly, in RT_GET(), the tree allocs a new\nmemory and updates the slot where the value is embedded with the\npointer to the allocated memory, and returns the pointer to the\ncaller. Since the returned value, newly allocated memory, is still\nempty, the callner needs to copy the contents of the old value to the\nnew value and do whatever else it needs to.\n\nIf the value is already a single-leave value and RT_GET() is called\nwith a larger size, the slot is always replaced with the newly\nallocated area and the caller needs to copy the contents? If the tree\ndoes realloc the value with a new size, how does the tree know the new\nvalue is larger than the existing value? It seems like the caller\nneeds to provide a function to calculate the size of the value based\non the length.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 8 Dec 2023 10:56:50 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Dec 7, 2023 at 12:27 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Mon, Nov 27, 2023 at 1:45 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Sat, Oct 28, 2023 at 5:56 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > bool\n> > RT_SET(RT_RADIX_TREE *tree, uint64 key, RT_VALUE_TYPE *value_p);\n> > or for variable-length value support,\n> > RT_SET(RT_RADIX_TREE *tree, uint64 key, RT_VALUE_TYPE *value_p, size_t sz);\n> >\n> > If an entry already exists, update its value to 'value_p' and return\n> > true. Otherwise set the value and return false.\n>\n> > RT_VALUE_TYPE\n> > RT_INSERT(RT_RADIX_TREE *tree, uint64 key, size_t sz, bool *found);\n> >\n> > If the entry already exists, replace the value with a new empty value\n> > with sz bytes and set \"found\" to true. Otherwise, insert an empty\n> > value, return its pointer, and set \"found\" to false.\n> >\n> > We probably will find a better name but I use RT_INSERT() for\n> > discussion. RT_INSERT() returns an empty slot regardless of existing\n> > values. It can be used to insert a new value or to replace the value\n> > with a larger value.\n>\n> Looking at TidStoreSetBlockOffsets again (in particular how it works\n> with RT_GET), and thinking about issues we've discussed, I think\n> RT_SET is sufficient for vacuum. Here's how it could work:\n>\n> TidStoreSetBlockOffsets could have a stack variable that's \"almost\n> always\" large enough. When not, it can allocate in its own context. It\n> sets the necessary bits there. Then, it passes the pointer to RT_SET\n> with the number of bytes to copy. That seems very simple.\n\nRight.\n\n>\n> At some future time, we can add a new function with the complex\n> business about getting the current value to modify it, with the\n> re-alloc'ing that it might require.\n>\n> In other words, from both an API perspective and a performance\n> perspective, it makes sense for tid store to have a simple \"set\"\n> interface for vacuum that can be optimized for its characteristics\n> (insert only, ordered offsets). And also a more complex one for bitmap\n> scan (setting/unsetting bits of existing values, in any order). They\n> can share the same iteration interface, key types, and value types.\n>\n> What do you think, Masahiko?\n\nGood point. RT_SET() would be faster than RT_GET() and updating the\nvalue because RT_SET() would not need to take care of the existing\nvalue (its size, embedded or not, realloc etc).\n\nI think that we can separate the radix tree patch into two parts: the\nmain implementation with RT_SET(), and more complex APIs such as\nRT_GET() etc. That way, it would probably make it easy to complete the\nradix tree and tidstore first.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 8 Dec 2023 11:24:38 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 8, 2023 at 8:57 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> It's still unclear to me why the value doesn't need to contain the size.\n>\n> If I understand you correctly, in RT_GET(), the tree allocs a new\n> memory and updates the slot where the value is embedded with the\n> pointer to the allocated memory, and returns the pointer to the\n> caller. Since the returned value, newly allocated memory, is still\n> empty, the callner needs to copy the contents of the old value to the\n> new value and do whatever else it needs to.\n>\n> If the value is already a single-leave value and RT_GET() is called\n> with a larger size, the slot is always replaced with the newly\n> allocated area and the caller needs to copy the contents? If the tree\n> does realloc the value with a new size, how does the tree know the new\n> value is larger than the existing value? It seems like the caller\n> needs to provide a function to calculate the size of the value based\n> on the length.\n\nRight. My brief description mentioned one thing without details: The\ncaller would need to control whether to re-alloc. RT_GET would pass\nthe size. If nothing is found, the tree would allocate. If there is a\nvalue already, just return it. That means both the address of the\nslot, and the local pointer to the value (with embedded, would be the\nsame address). The caller checks if the array is long enough. If not,\ncall a new function that takes the new size, the address of the slot,\nand the pointer to the old value. The tree would re-alloc, put the\nalloc pointer in the slot and return the new local pointer. But as we\nagreed, that is all follow-up work.\n\n\n", "msg_date": "Fri, 8 Dec 2023 11:37:17 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 8, 2023 at 1:37 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Fri, Dec 8, 2023 at 8:57 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > It's still unclear to me why the value doesn't need to contain the size.\n> >\n> > If I understand you correctly, in RT_GET(), the tree allocs a new\n> > memory and updates the slot where the value is embedded with the\n> > pointer to the allocated memory, and returns the pointer to the\n> > caller. Since the returned value, newly allocated memory, is still\n> > empty, the callner needs to copy the contents of the old value to the\n> > new value and do whatever else it needs to.\n> >\n> > If the value is already a single-leave value and RT_GET() is called\n> > with a larger size, the slot is always replaced with the newly\n> > allocated area and the caller needs to copy the contents? If the tree\n> > does realloc the value with a new size, how does the tree know the new\n> > value is larger than the existing value? It seems like the caller\n> > needs to provide a function to calculate the size of the value based\n> > on the length.\n>\n> Right. My brief description mentioned one thing without details: The\n> caller would need to control whether to re-alloc. RT_GET would pass\n> the size. If nothing is found, the tree would allocate. If there is a\n> value already, just return it. That means both the address of the\n> slot, and the local pointer to the value (with embedded, would be the\n> same address). The caller checks if the array is long enough. If not,\n> call a new function that takes the new size, the address of the slot,\n> and the pointer to the old value. The tree would re-alloc, put the\n> alloc pointer in the slot and return the new local pointer. But as we\n> agreed, that is all follow-up work.\n\nThank you for the detailed explanation. That makes sense to me. We\nwill address it as a follow-up work.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 8 Dec 2023 15:45:12 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 8, 2023 at 3:45 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Fri, Dec 8, 2023 at 1:37 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Fri, Dec 8, 2023 at 8:57 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > It's still unclear to me why the value doesn't need to contain the size.\n> > >\n> > > If I understand you correctly, in RT_GET(), the tree allocs a new\n> > > memory and updates the slot where the value is embedded with the\n> > > pointer to the allocated memory, and returns the pointer to the\n> > > caller. Since the returned value, newly allocated memory, is still\n> > > empty, the callner needs to copy the contents of the old value to the\n> > > new value and do whatever else it needs to.\n> > >\n> > > If the value is already a single-leave value and RT_GET() is called\n> > > with a larger size, the slot is always replaced with the newly\n> > > allocated area and the caller needs to copy the contents? If the tree\n> > > does realloc the value with a new size, how does the tree know the new\n> > > value is larger than the existing value? It seems like the caller\n> > > needs to provide a function to calculate the size of the value based\n> > > on the length.\n> >\n> > Right. My brief description mentioned one thing without details: The\n> > caller would need to control whether to re-alloc. RT_GET would pass\n> > the size. If nothing is found, the tree would allocate. If there is a\n> > value already, just return it. That means both the address of the\n> > slot, and the local pointer to the value (with embedded, would be the\n> > same address).\n\nBTW Given that the actual value size can be calculated only by the\ncaller, how does the tree know if the value is embedded or not? It's\nprobably related to how to store combined pointer/value slots. If leaf\nnodes have a bitmap array that indicates the corresponding slot is an\nembedded value or a pointer to a value, it would be easy. But since\nthe bitmap array is needed only in the leaf nodes, internal nodes and\nleaf nodes will no longer be identical structure, which is not a bad\nthing to me, though.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 8 Dec 2023 17:05:53 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 8, 2023 at 3:06 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> BTW Given that the actual value size can be calculated only by the\n> caller, how does the tree know if the value is embedded or not? It's\n> probably related to how to store combined pointer/value slots.\n\nRight, this is future work. At first, variable-length types will have\nto be single-value leaves. In fact, the idea for storing up to 3\noffsets in the bitmap header could be done this way -- it would just\nbe a (small) single-value leaf.\n\n(Reminder: Currently, fixed-length values are compile-time embeddable\nif the platform pointer size is big enough.)\n\n> If leaf\n> nodes have a bitmap array that indicates the corresponding slot is an\n> embedded value or a pointer to a value, it would be easy.\n\nThat's the most general way to do it. We could do it much more easily\nwith a pointer tag, although for the above idea it may require some\nendian-aware coding. Both were mentioned in the paper, I recall.\n\n> But since\n> the bitmap array is needed only in the leaf nodes, internal nodes and\n> leaf nodes will no longer be identical structure, which is not a bad\n> thing to me, though.\n\nAbsolutely no way we are going back to double everything: double\ntypes, double functions, double memory contexts. Plus, that bitmap in\ninner nodes could indicate a pointer to a leaf that got there by \"lazy\nexpansion\".\n\n\n", "msg_date": "Fri, 8 Dec 2023 17:46:29 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 8, 2023 at 7:46 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Fri, Dec 8, 2023 at 3:06 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > BTW Given that the actual value size can be calculated only by the\n> > caller, how does the tree know if the value is embedded or not? It's\n> > probably related to how to store combined pointer/value slots.\n>\n> Right, this is future work. At first, variable-length types will have\n> to be single-value leaves. In fact, the idea for storing up to 3\n> offsets in the bitmap header could be done this way -- it would just\n> be a (small) single-value leaf.\n\nAgreed.\n\n>\n> (Reminder: Currently, fixed-length values are compile-time embeddable\n> if the platform pointer size is big enough.)\n>\n> > If leaf\n> > nodes have a bitmap array that indicates the corresponding slot is an\n> > embedded value or a pointer to a value, it would be easy.\n>\n> That's the most general way to do it. We could do it much more easily\n> with a pointer tag, although for the above idea it may require some\n> endian-aware coding. Both were mentioned in the paper, I recall.\n\nTrue. Probably we can use the combined pointer/value slots approach\nonly if the tree is able to use the pointer tagging. That is, if the\ncaller allows the tree to use one bit of the value.\n\nI'm going to update the patch based on the recent discussion (RT_SET()\nand variable-length values) etc., and post the patch set early next\nweek.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 8 Dec 2023 21:44:12 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 8, 2023 at 9:44 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Fri, Dec 8, 2023 at 7:46 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Fri, Dec 8, 2023 at 3:06 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > BTW Given that the actual value size can be calculated only by the\n> > > caller, how does the tree know if the value is embedded or not? It's\n> > > probably related to how to store combined pointer/value slots.\n> >\n> > Right, this is future work. At first, variable-length types will have\n> > to be single-value leaves. In fact, the idea for storing up to 3\n> > offsets in the bitmap header could be done this way -- it would just\n> > be a (small) single-value leaf.\n>\n> Agreed.\n>\n> >\n> > (Reminder: Currently, fixed-length values are compile-time embeddable\n> > if the platform pointer size is big enough.)\n> >\n> > > If leaf\n> > > nodes have a bitmap array that indicates the corresponding slot is an\n> > > embedded value or a pointer to a value, it would be easy.\n> >\n> > That's the most general way to do it. We could do it much more easily\n> > with a pointer tag, although for the above idea it may require some\n> > endian-aware coding. Both were mentioned in the paper, I recall.\n>\n> True. Probably we can use the combined pointer/value slots approach\n> only if the tree is able to use the pointer tagging. That is, if the\n> caller allows the tree to use one bit of the value.\n>\n> I'm going to update the patch based on the recent discussion (RT_SET()\n> and variable-length values) etc., and post the patch set early next\n> week.\n\nI've attached the updated patch set. From the previous patch set, I've\nmerged patches 0007 to 0010. The other changes such as adding RT_GET()\nstill are unmerged for now, for discussion. Probably we can make them\nas follow-up patches as we discussed. 0011 to 0015 patches are new\nchanges for v44 patch set, which removes RT_SEARCH() and RT_SET() and\nsupport variable-length values.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 11 Dec 2023 15:17:37 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Dec 11, 2023 at 1:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> I've attached the updated patch set. From the previous patch set, I've\n> merged patches 0007 to 0010. The other changes such as adding RT_GET()\n> still are unmerged for now, for discussion. Probably we can make them\n> as follow-up patches as we discussed. 0011 to 0015 patches are new\n> changes for v44 patch set, which removes RT_SEARCH() and RT_SET() and\n> support variable-length values.\n\nThis looks like the right direction, and I'm pleased it's not much\nadditional code on top of my last patch.\n\nv44-0014:\n\n+#ifdef RT_VARLEN_VALUE\n+ /* XXX: need to choose block sizes? */\n+ tree->leaf_ctx = AllocSetContextCreate(ctx,\n+ \"radix tree leaves\",\n+ ALLOCSET_DEFAULT_SIZES);\n+#else\n+ tree->leaf_ctx = SlabContextCreate(ctx,\n+ \"radix tree leaves\",\n+ RT_SLAB_BLOCK_SIZE(sizeof(RT_VALUE_TYPE)),\n+ sizeof(RT_VALUE_TYPE));\n+#endif /* RT_VARLEN_VALUE */\n\nChoosing block size: Similar to what we've discussed previously around\nDSA segments, we might model this on CreateWorkExprContext() in\nsrc/backend/executor/execUtils.c. Maybe tid store can pass maint_w_m /\nautovac_w_m (later work_mem for bitmap scan). RT_CREATE could set the\nmax block size to 1/16 of that, or less.\n\nAlso, it occurred to me that compile-time embeddable values don't need\na leaf context. I'm not sure how many places assume that there is\nalways a leaf context. If not many, it may be worth not creating one\nhere, just to be tidy.\n\n+ size_t copysize;\n\n- memcpy(leaf.local, value_p, sizeof(RT_VALUE_TYPE));\n+ copysize = sizeof(RT_VALUE_TYPE);\n+#endif\n+\n+ memcpy(leaf.local, value_p, copysize);\n\nI'm not sure this indirection adds clarity. I guess the intent was to\nkeep from saying \"memcpy\" twice, but now the code has to say \"copysize\n= foo\" twice.\n\nFor varlen case, we need to watch out for slowness because of memcpy.\nLet's put that off for later testing, though. We may someday want to\navoid a memcpy call for the varlen case, so let's keep it flexible\nhere.\n\nv44-0015:\n\n+#define SizeOfBlocktableEntry (offsetof(\n\nUnused.\n\n+ char buf[MaxBlocktableEntrySize] = {0};\n\nZeroing this buffer is probably going to be expensive. Also see this\npre-existing comment:\n/* WIP: slow, since it writes to memory for every bit */\npage->words[wordnum] |= ((bitmapword) 1 << bitnum);\n\nFor this function (which will be vacuum-only, so we can assume\nordering), in the loop we can:\n* declare the local bitmapword variable to be zero\n* set the bits on it\n* write it out to the right location when done.\n\nLet's fix both of these at once.\n\n+ if (TidStoreIsShared(ts))\n+ shared_rt_set(ts->tree.shared, blkno, (void *) page, page_len);\n+ else\n+ local_rt_set(ts->tree.local, blkno, (void *) page, page_len);\n\nIs there a reason for \"void *\"? The declared parameter is\n\"RT_VALUE_TYPE *value_p\" in 0014.\nAlso, since this function is for vacuum (and other uses will need a\nnew function), let's assert the returned bool is false.\n\nDoes iteration still work? If so, it's not too early to re-wire this\nup with vacuum and see how it behaves.\n\nLastly, my compiler has a warning that CI doesn't have:\n\nIn file included from ../src/test/modules/test_radixtree/test_radixtree.c:121:\n../src/include/lib/radixtree.h: In function ‘rt_find.isra’:\n../src/include/lib/radixtree.h:2142:24: warning: ‘slot’ may be used\nuninitialized [-Wmaybe-uninitialized]\n 2142 | return (RT_VALUE_TYPE*) slot;\n | ^~~~~~~~~~~~~~~~~~~~~\n../src/include/lib/radixtree.h:2112:23: note: ‘slot’ was declared here\n 2112 | RT_PTR_ALLOC *slot;\n | ^~~~\n\n\n", "msg_date": "Tue, 12 Dec 2023 09:53:08 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Dec 12, 2023 at 11:53 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Mon, Dec 11, 2023 at 1:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I've attached the updated patch set. From the previous patch set, I've\n> > merged patches 0007 to 0010. The other changes such as adding RT_GET()\n> > still are unmerged for now, for discussion. Probably we can make them\n> > as follow-up patches as we discussed. 0011 to 0015 patches are new\n> > changes for v44 patch set, which removes RT_SEARCH() and RT_SET() and\n> > support variable-length values.\n>\n> This looks like the right direction, and I'm pleased it's not much\n> additional code on top of my last patch.\n>\n> v44-0014:\n>\n> +#ifdef RT_VARLEN_VALUE\n> + /* XXX: need to choose block sizes? */\n> + tree->leaf_ctx = AllocSetContextCreate(ctx,\n> + \"radix tree leaves\",\n> + ALLOCSET_DEFAULT_SIZES);\n> +#else\n> + tree->leaf_ctx = SlabContextCreate(ctx,\n> + \"radix tree leaves\",\n> + RT_SLAB_BLOCK_SIZE(sizeof(RT_VALUE_TYPE)),\n> + sizeof(RT_VALUE_TYPE));\n> +#endif /* RT_VARLEN_VALUE */\n>\n> Choosing block size: Similar to what we've discussed previously around\n> DSA segments, we might model this on CreateWorkExprContext() in\n> src/backend/executor/execUtils.c. Maybe tid store can pass maint_w_m /\n> autovac_w_m (later work_mem for bitmap scan). RT_CREATE could set the\n> max block size to 1/16 of that, or less.\n>\n> Also, it occurred to me that compile-time embeddable values don't need\n> a leaf context. I'm not sure how many places assume that there is\n> always a leaf context. If not many, it may be worth not creating one\n> here, just to be tidy.\n>\n> + size_t copysize;\n>\n> - memcpy(leaf.local, value_p, sizeof(RT_VALUE_TYPE));\n> + copysize = sizeof(RT_VALUE_TYPE);\n> +#endif\n> +\n> + memcpy(leaf.local, value_p, copysize);\n>\n> I'm not sure this indirection adds clarity. I guess the intent was to\n> keep from saying \"memcpy\" twice, but now the code has to say \"copysize\n> = foo\" twice.\n>\n> For varlen case, we need to watch out for slowness because of memcpy.\n> Let's put that off for later testing, though. We may someday want to\n> avoid a memcpy call for the varlen case, so let's keep it flexible\n> here.\n>\n> v44-0015:\n>\n> +#define SizeOfBlocktableEntry (offsetof(\n>\n> Unused.\n>\n> + char buf[MaxBlocktableEntrySize] = {0};\n>\n> Zeroing this buffer is probably going to be expensive. Also see this\n> pre-existing comment:\n> /* WIP: slow, since it writes to memory for every bit */\n> page->words[wordnum] |= ((bitmapword) 1 << bitnum);\n>\n> For this function (which will be vacuum-only, so we can assume\n> ordering), in the loop we can:\n> * declare the local bitmapword variable to be zero\n> * set the bits on it\n> * write it out to the right location when done.\n>\n> Let's fix both of these at once.\n>\n> + if (TidStoreIsShared(ts))\n> + shared_rt_set(ts->tree.shared, blkno, (void *) page, page_len);\n> + else\n> + local_rt_set(ts->tree.local, blkno, (void *) page, page_len);\n>\n> Is there a reason for \"void *\"? The declared parameter is\n> \"RT_VALUE_TYPE *value_p\" in 0014.\n> Also, since this function is for vacuum (and other uses will need a\n> new function), let's assert the returned bool is false.\n>\n> Does iteration still work? If so, it's not too early to re-wire this\n> up with vacuum and see how it behaves.\n>\n> Lastly, my compiler has a warning that CI doesn't have:\n>\n> In file included from ../src/test/modules/test_radixtree/test_radixtree.c:121:\n> ../src/include/lib/radixtree.h: In function ‘rt_find.isra’:\n> ../src/include/lib/radixtree.h:2142:24: warning: ‘slot’ may be used\n> uninitialized [-Wmaybe-uninitialized]\n> 2142 | return (RT_VALUE_TYPE*) slot;\n> | ^~~~~~~~~~~~~~~~~~~~~\n> ../src/include/lib/radixtree.h:2112:23: note: ‘slot’ was declared here\n> 2112 | RT_PTR_ALLOC *slot;\n> | ^~~~\n\nThank you for the comments! I agreed with all of them and incorporated\nthem into the attached latest patch set, v45.\n\nIn v45, 0001 - 0006 are from earlier versions but I've merged previous\nupdates. So the radix tree now has RT_SET() and RT_FIND() but not\nRT_GET() and RT_SEARCH(). 0007 and 0008 are the updates from previous\nversions that incorporated the above comments. 0009 patch integrates\ntidstore with lazy vacuum. Note that DSA segment problem is not\nresolved yet in this patch. 0010 and 0011 makes DSA initial/max\nsegment size configurable and make parallel vacuum specify both in\nproportion to maintenance_work_mem. 0012 is a development-purpose\npatch to make it easy to investigate bugs in tidstore. I'd like to\nkeep it in the patch set at least during the development.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 14 Dec 2023 09:22:11 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Dec 14, 2023 at 7:22 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> In v45, 0001 - 0006 are from earlier versions but I've merged previous\n> updates. So the radix tree now has RT_SET() and RT_FIND() but not\n> RT_GET() and RT_SEARCH(). 0007 and 0008 are the updates from previous\n> versions that incorporated the above comments. 0009 patch integrates\n> tidstore with lazy vacuum.\n\nExcellent! I repeated a quick run of the small \"test 1\" with very low m_w_m from\n\nhttps://www.postgresql.org/message-id/CAFBsxsHrvTPUK%3DC1%3DxweJjGujja4Xjfgva3C8jnW3Shz6RBnFg%40mail.gmail.com\n\n...and got similar results, so we still have good space-efficiency on this test:\n\nmaster:\nINFO: finished vacuuming \"john.public.test\": index scans: 9\nsystem usage: CPU: user: 56.83 s, system: 9.36 s, elapsed: 119.62 s\n\nv45:\nINFO: finished vacuuming \"john.public.test\": index scans: 1\nsystem usage: CPU: user: 6.82 s, system: 2.05 s, elapsed: 10.89 s\n\nMore sparse TID distributions won't be as favorable, but we have ideas\nto improve that in the future.\n\nFor my next steps, I will finish the node-shrinking behavior and save\nfor a later patchset. Not needed for tid store, but needs to happen\nbecause of assumptions in the code. Also, some time ago, I think I\ncommented out RT_FREE_RECURSE to get something working, so I'll fix\nit, and look at other fixmes and todos.\n\n> Note that DSA segment problem is not\n> resolved yet in this patch.\n\nI remember you started a separate thread about this, but I don't think\nit got any attention. Maybe reply with a \"TLDR;\" and share a patch to\nallow controlling max segment size.\n\nSome more comments:\n\nv45-0003:\n\nSince RT_ITERATE_NEXT_PTR works for tid store, do we even need\nRT_ITERATE_NEXT anymore? The former should handle fixed-length values\njust fine? If so, we should rename it to match the latter.\n\n+ * The caller is responsible for locking/unlocking the tree in shared mode.\n\nThis is not new to v45, but this will come up again below. This needs\nmore explanation: Since we're returning a pointer (to support\nvariable-length values), the caller needs to maintain control until\nit's finished with the value.\n\nv45-0005:\n\n+ * Regarding the concurrency support, we use a single LWLock for the TidStore.\n+ * The TidStore is exclusively locked when inserting encoded tids to the\n+ * radix tree or when resetting itself. When searching on the TidStore or\n+ * doing the iteration, it is not locked but the underlying radix tree is\n+ * locked in shared mode.\n\nThis is just stating facts without giving any reasons. Readers are\ngoing to wonder why it's inconsistent. The \"why\" is much more\nimportant than the \"what\". Even with that, this comment is also far\nfrom the relevant parts, and so will get out of date. Maybe we can\njust make sure each relevant function is explained individually.\n\nv45-0007:\n\n-RT_SCOPE RT_RADIX_TREE * RT_CREATE(MemoryContext ctx);\n+RT_SCOPE RT_RADIX_TREE * RT_CREATE(MemoryContext ctx, Size work_mem);\n\nTid store calls this max_bytes -- can we use that name here, too?\n\"work_mem\" is highly specific.\n\n- RT_PTR_ALLOC *slot;\n+ RT_PTR_ALLOC *slot = NULL;\n\nWe have a macro for invalid pointer because of DSA.\n\nv45-0008:\n\n- if (off < 1 || off > MAX_TUPLES_PER_PAGE)\n+ if (unlikely(off < 1 || off > MAX_TUPLES_PER_PAGE))\n elog(ERROR, \"tuple offset out of range: %u\", off);\n\nThis is a superfluous distraction, since the error path is located way\noff in the cold segment of the binary.\n\nv45-0009:\n\n(just a few small things for now)\n\n- * lazy_vacuum_heap_page() -- free page's LP_DEAD items listed in the\n- * vacrel->dead_items array.\n+ * lazy_vacuum_heap_page() -- free page's LP_DEAD items.\n\nI think we can keep as \"listed in the TID store\".\n\n- * Allocate dead_items (either using palloc, or in dynamic shared memory).\n- * Sets dead_items in vacrel for caller.\n+ * Allocate a (local or shared) TidStore for storing dead TIDs. Sets dead_items\n+ * in vacrel for caller.\n\nI think we want to keep \"in dynamic shared memory\". It's still true.\nI'm not sure anything needs to change here, actually.\n\n parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,\n- int nrequested_workers, int max_items,\n- int elevel, BufferAccessStrategy bstrategy)\n+ int nrequested_workers, int vac_work_mem,\n+ int max_offset, int elevel,\n+ BufferAccessStrategy bstrategy)\n\nIt seems very strange to me that this function has to pass the\nmax_offset. In general, it's been simpler to assume we have a constant\nmax_offset, but in this case that fact is not helping. Something to\nthink about for later.\n\n- (errmsg(\"scanned index \\\"%s\\\" to remove %d row versions\",\n+ (errmsg(\"scanned index \\\"%s\\\" to remove \" UINT64_FORMAT \" row versions\",\n\nThis should be signed int64.\n\nv45-0010:\n\nThinking about this some more, I'm not sure we need to do anything\ndifferent for the *starting* segment size. (Controlling *max* size\ndoes seem important, however.) For the corner case of m_w_m = 1MB,\nit's fine if vacuum quits pruning immediately after (in effect) it\nfinds the DSA has gone to 2MB. It's not worth bothering with, IMO. If\nthe memory accounting starts >1MB because we're adding the trivial\nsize of some struct, let's just stop doing that. The segment\nallocations are what we care about.\n\nv45-0011:\n\n+ /*\n+ * max_bytes is forced to be at least 64kB, the current minimum valid\n+ * value for the work_mem GUC.\n+ */\n+ max_bytes = Max(64 * 1024L, max_bytes);\n\nWhy? I believe I mentioned months ago that copying a hard-coded value\nthat can get out of sync is not maintainable, but I don't even see the\npoint of this part.\n\n\n", "msg_date": "Fri, 15 Dec 2023 08:30:18 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 15, 2023 at 10:30 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Dec 14, 2023 at 7:22 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > In v45, 0001 - 0006 are from earlier versions but I've merged previous\n> > updates. So the radix tree now has RT_SET() and RT_FIND() but not\n> > RT_GET() and RT_SEARCH(). 0007 and 0008 are the updates from previous\n> > versions that incorporated the above comments. 0009 patch integrates\n> > tidstore with lazy vacuum.\n>\n> Excellent! I repeated a quick run of the small \"test 1\" with very low m_w_m from\n>\n> https://www.postgresql.org/message-id/CAFBsxsHrvTPUK%3DC1%3DxweJjGujja4Xjfgva3C8jnW3Shz6RBnFg%40mail.gmail.com\n>\n> ...and got similar results, so we still have good space-efficiency on this test:\n>\n> master:\n> INFO: finished vacuuming \"john.public.test\": index scans: 9\n> system usage: CPU: user: 56.83 s, system: 9.36 s, elapsed: 119.62 s\n>\n> v45:\n> INFO: finished vacuuming \"john.public.test\": index scans: 1\n> system usage: CPU: user: 6.82 s, system: 2.05 s, elapsed: 10.89 s\n\nThank you for testing it again. That's a very good result.\n\n> For my next steps, I will finish the node-shrinking behavior and save\n> for a later patchset. Not needed for tid store, but needs to happen\n> because of assumptions in the code. Also, some time ago, I think I\n> commented out RT_FREE_RECURSE to get something working, so I'll fix\n> it, and look at other fixmes and todos.\n\nGreat!\n\n>\n> > Note that DSA segment problem is not\n> > resolved yet in this patch.\n>\n> I remember you started a separate thread about this, but I don't think\n> it got any attention. Maybe reply with a \"TLDR;\" and share a patch to\n> allow controlling max segment size.\n\nYeah, I recalled that thread. Will send a reply.\n\n>\n> Some more comments:\n>\n> v45-0003:\n>\n> Since RT_ITERATE_NEXT_PTR works for tid store, do we even need\n> RT_ITERATE_NEXT anymore? The former should handle fixed-length values\n> just fine? If so, we should rename it to match the latter.\n\nAgreed to rename it.\n\n>\n> + * The caller is responsible for locking/unlocking the tree in shared mode.\n>\n> This is not new to v45, but this will come up again below. This needs\n> more explanation: Since we're returning a pointer (to support\n> variable-length values), the caller needs to maintain control until\n> it's finished with the value.\n\nWill fix.\n\n>\n> v45-0005:\n>\n> + * Regarding the concurrency support, we use a single LWLock for the TidStore.\n> + * The TidStore is exclusively locked when inserting encoded tids to the\n> + * radix tree or when resetting itself. When searching on the TidStore or\n> + * doing the iteration, it is not locked but the underlying radix tree is\n> + * locked in shared mode.\n>\n> This is just stating facts without giving any reasons. Readers are\n> going to wonder why it's inconsistent. The \"why\" is much more\n> important than the \"what\". Even with that, this comment is also far\n> from the relevant parts, and so will get out of date. Maybe we can\n> just make sure each relevant function is explained individually.\n\nRight, I'll fix it.\n\n>\n> v45-0007:\n>\n> -RT_SCOPE RT_RADIX_TREE * RT_CREATE(MemoryContext ctx);\n> +RT_SCOPE RT_RADIX_TREE * RT_CREATE(MemoryContext ctx, Size work_mem);\n>\n> Tid store calls this max_bytes -- can we use that name here, too?\n> \"work_mem\" is highly specific.\n\nWhile I agree that \"work_mem\" is highly specific, I avoided using\n\"max_bytes\" in radix tree because \"max_bytes\" sounds to me there is a\nmemory limitation but the radix tree doesn't have it actually. It\nmight be sufficient to mention it in the comment, though.\n\n>\n> - RT_PTR_ALLOC *slot;\n> + RT_PTR_ALLOC *slot = NULL;\n>\n> We have a macro for invalid pointer because of DSA.\n\nWill fix.\n\n>\n> v45-0008:\n>\n> - if (off < 1 || off > MAX_TUPLES_PER_PAGE)\n> + if (unlikely(off < 1 || off > MAX_TUPLES_PER_PAGE))\n> elog(ERROR, \"tuple offset out of range: %u\", off);\n>\n> This is a superfluous distraction, since the error path is located way\n> off in the cold segment of the binary.\n\nOkay, will remove it.\n\n>\n> v45-0009:\n>\n> (just a few small things for now)\n>\n> - * lazy_vacuum_heap_page() -- free page's LP_DEAD items listed in the\n> - * vacrel->dead_items array.\n> + * lazy_vacuum_heap_page() -- free page's LP_DEAD items.\n>\n> I think we can keep as \"listed in the TID store\".\n>\n> - * Allocate dead_items (either using palloc, or in dynamic shared memory).\n> - * Sets dead_items in vacrel for caller.\n> + * Allocate a (local or shared) TidStore for storing dead TIDs. Sets dead_items\n> + * in vacrel for caller.\n>\n> I think we want to keep \"in dynamic shared memory\". It's still true.\n> I'm not sure anything needs to change here, actually.\n\nAgreed with above comments. Will fix them.\n\n>\n> parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,\n> - int nrequested_workers, int max_items,\n> - int elevel, BufferAccessStrategy bstrategy)\n> + int nrequested_workers, int vac_work_mem,\n> + int max_offset, int elevel,\n> + BufferAccessStrategy bstrategy)\n>\n> It seems very strange to me that this function has to pass the\n> max_offset. In general, it's been simpler to assume we have a constant\n> max_offset, but in this case that fact is not helping. Something to\n> think about for later.\n\nmax_offset was previously used in old TID encoding in tidstore. Since\ntidstore has entries for each block, I think we no longer need it.\n\n>\n> - (errmsg(\"scanned index \\\"%s\\\" to remove %d row versions\",\n> + (errmsg(\"scanned index \\\"%s\\\" to remove \" UINT64_FORMAT \" row versions\",\n>\n> This should be signed int64.\n\nWill fix.\n\n>\n> v45-0010:\n>\n> Thinking about this some more, I'm not sure we need to do anything\n> different for the *starting* segment size. (Controlling *max* size\n> does seem important, however.) For the corner case of m_w_m = 1MB,\n> it's fine if vacuum quits pruning immediately after (in effect) it\n> finds the DSA has gone to 2MB. It's not worth bothering with, IMO. If\n> the memory accounting starts >1MB because we're adding the trivial\n> size of some struct, let's just stop doing that. The segment\n> allocations are what we care about.\n\nIIUC it's for work_mem, whose the minimum value is 64kB.\n\n>\n> v45-0011:\n>\n> + /*\n> + * max_bytes is forced to be at least 64kB, the current minimum valid\n> + * value for the work_mem GUC.\n> + */\n> + max_bytes = Max(64 * 1024L, max_bytes);\n>\n> Why?\n\nThis is to avoid creating a radix tree within very small memory. The\nminimum work_mem value is a reasonable lower bound that PostgreSQL\nuses internally. It's actually copied from tuplesort.c.\n\n>I believe I mentioned months ago that copying a hard-coded value\n> that can get out of sync is not maintainable, but I don't even see the\n> point of this part.\n\nTrue.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 15 Dec 2023 17:15:09 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Dec 15, 2023 at 3:15 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Fri, Dec 15, 2023 at 10:30 AM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,\n> > - int nrequested_workers, int max_items,\n> > - int elevel, BufferAccessStrategy bstrategy)\n> > + int nrequested_workers, int vac_work_mem,\n> > + int max_offset, int elevel,\n> > + BufferAccessStrategy bstrategy)\n> >\n> > It seems very strange to me that this function has to pass the\n> > max_offset. In general, it's been simpler to assume we have a constant\n> > max_offset, but in this case that fact is not helping. Something to\n> > think about for later.\n>\n> max_offset was previously used in old TID encoding in tidstore. Since\n> tidstore has entries for each block, I think we no longer need it.\n\nIt's needed now to properly size the allocation of TidStoreIter which\ncontains...\n\n+/* Result struct for TidStoreIterateNext */\n+typedef struct TidStoreIterResult\n+{\n+ BlockNumber blkno;\n+ int num_offsets;\n+ OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER];\n+} TidStoreIterResult;\n\nMaybe we can palloc the offset array to \"almost always\" big enough,\nwith logic to resize if needed? If not too hard, seems worth it to\navoid churn in the parameter list.\n\n> > v45-0010:\n> >\n> > Thinking about this some more, I'm not sure we need to do anything\n> > different for the *starting* segment size. (Controlling *max* size\n> > does seem important, however.) For the corner case of m_w_m = 1MB,\n> > it's fine if vacuum quits pruning immediately after (in effect) it\n> > finds the DSA has gone to 2MB. It's not worth bothering with, IMO. If\n> > the memory accounting starts >1MB because we're adding the trivial\n> > size of some struct, let's just stop doing that. The segment\n> > allocations are what we care about.\n>\n> IIUC it's for work_mem, whose the minimum value is 64kB.\n>\n> >\n> > v45-0011:\n> >\n> > + /*\n> > + * max_bytes is forced to be at least 64kB, the current minimum valid\n> > + * value for the work_mem GUC.\n> > + */\n> > + max_bytes = Max(64 * 1024L, max_bytes);\n> >\n> > Why?\n>\n> This is to avoid creating a radix tree within very small memory. The\n> minimum work_mem value is a reasonable lower bound that PostgreSQL\n> uses internally. It's actually copied from tuplesort.c.\n\nThere is no explanation for why it should be done like tuplesort.c. Also...\n\n- tree->leaf_ctx = SlabContextCreate(ctx,\n- \"radix tree leaves\",\n- RT_SLAB_BLOCK_SIZE(sizeof(RT_VALUE_TYPE)),\n- sizeof(RT_VALUE_TYPE));\n+ tree->leaf_ctx = SlabContextCreate(ctx,\n+ \"radix tree leaves\",\n+ Min(RT_SLAB_BLOCK_SIZE(sizeof(RT_VALUE_TYPE)),\n+ work_mem),\n+ sizeof(RT_VALUE_TYPE));\n\nAt first, my eyes skipped over this apparent re-indent, but hidden\ninside here is another (undocumented) attempt to clamp the size of\nsomething. There are too many of these sprinkled in various places,\nand they're already a maintenance hazard -- a different one was left\nbehind in v45-0011:\n\n@@ -201,6 +183,7 @@ TidStoreCreate(size_t max_bytes, int max_off,\ndsa_area *area)\n ts->control->max_bytes = max_bytes - (70 * 1024);\n }\n\nLet's do it in just one place. In TidStoreCreate(), do\n\n/* clamp max_bytes to at least the size of the empty tree with\nallocated blocks, so it doesn't immediately appear full */\nts->control->max_bytes = Max(max_bytes, {rt, shared_rt}_memory_usage);\n\nThen we can get rid of all the worry about 1MB/2MB, 64kB, 70kB -- all that.\n\nI may not recall everything while writing this, but it seems the only\nother thing we should be clamping is the max aset block size (solved)\n/ max DSM segment size (in progress).\n\n\n", "msg_date": "Mon, 18 Dec 2023 13:41:35 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Dec 18, 2023 at 3:41 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Fri, Dec 15, 2023 at 3:15 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Fri, Dec 15, 2023 at 10:30 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > parallel_vacuum_init(Relation rel, Relation *indrels, int nindexes,\n> > > - int nrequested_workers, int max_items,\n> > > - int elevel, BufferAccessStrategy bstrategy)\n> > > + int nrequested_workers, int vac_work_mem,\n> > > + int max_offset, int elevel,\n> > > + BufferAccessStrategy bstrategy)\n> > >\n> > > It seems very strange to me that this function has to pass the\n> > > max_offset. In general, it's been simpler to assume we have a constant\n> > > max_offset, but in this case that fact is not helping. Something to\n> > > think about for later.\n> >\n> > max_offset was previously used in old TID encoding in tidstore. Since\n> > tidstore has entries for each block, I think we no longer need it.\n>\n> It's needed now to properly size the allocation of TidStoreIter which\n> contains...\n>\n> +/* Result struct for TidStoreIterateNext */\n> +typedef struct TidStoreIterResult\n> +{\n> + BlockNumber blkno;\n> + int num_offsets;\n> + OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER];\n> +} TidStoreIterResult;\n>\n> Maybe we can palloc the offset array to \"almost always\" big enough,\n> with logic to resize if needed? If not too hard, seems worth it to\n> avoid churn in the parameter list.\n\nYes, I was thinking of that.\n\n>\n> > > v45-0010:\n> > >\n> > > Thinking about this some more, I'm not sure we need to do anything\n> > > different for the *starting* segment size. (Controlling *max* size\n> > > does seem important, however.) For the corner case of m_w_m = 1MB,\n> > > it's fine if vacuum quits pruning immediately after (in effect) it\n> > > finds the DSA has gone to 2MB. It's not worth bothering with, IMO. If\n> > > the memory accounting starts >1MB because we're adding the trivial\n> > > size of some struct, let's just stop doing that. The segment\n> > > allocations are what we care about.\n> >\n> > IIUC it's for work_mem, whose the minimum value is 64kB.\n> >\n> > >\n> > > v45-0011:\n> > >\n> > > + /*\n> > > + * max_bytes is forced to be at least 64kB, the current minimum valid\n> > > + * value for the work_mem GUC.\n> > > + */\n> > > + max_bytes = Max(64 * 1024L, max_bytes);\n> > >\n> > > Why?\n> >\n> > This is to avoid creating a radix tree within very small memory. The\n> > minimum work_mem value is a reasonable lower bound that PostgreSQL\n> > uses internally. It's actually copied from tuplesort.c.\n>\n> There is no explanation for why it should be done like tuplesort.c. Also...\n>\n> - tree->leaf_ctx = SlabContextCreate(ctx,\n> - \"radix tree leaves\",\n> - RT_SLAB_BLOCK_SIZE(sizeof(RT_VALUE_TYPE)),\n> - sizeof(RT_VALUE_TYPE));\n> + tree->leaf_ctx = SlabContextCreate(ctx,\n> + \"radix tree leaves\",\n> + Min(RT_SLAB_BLOCK_SIZE(sizeof(RT_VALUE_TYPE)),\n> + work_mem),\n> + sizeof(RT_VALUE_TYPE));\n>\n> At first, my eyes skipped over this apparent re-indent, but hidden\n> inside here is another (undocumented) attempt to clamp the size of\n> something. There are too many of these sprinkled in various places,\n> and they're already a maintenance hazard -- a different one was left\n> behind in v45-0011:\n>\n> @@ -201,6 +183,7 @@ TidStoreCreate(size_t max_bytes, int max_off,\n> dsa_area *area)\n> ts->control->max_bytes = max_bytes - (70 * 1024);\n> }\n>\n> Let's do it in just one place. In TidStoreCreate(), do\n>\n> /* clamp max_bytes to at least the size of the empty tree with\n> allocated blocks, so it doesn't immediately appear full */\n> ts->control->max_bytes = Max(max_bytes, {rt, shared_rt}_memory_usage);\n>\n> Then we can get rid of all the worry about 1MB/2MB, 64kB, 70kB -- all that.\n\nBut doesn't it mean that even if we create a shared tidstore with\nsmall memory, say 64kB, it actually uses 1MB?\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 19 Dec 2023 14:37:04 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Dec 19, 2023 at 12:37 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Dec 18, 2023 at 3:41 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > Let's do it in just one place. In TidStoreCreate(), do\n> >\n> > /* clamp max_bytes to at least the size of the empty tree with\n> > allocated blocks, so it doesn't immediately appear full */\n> > ts->control->max_bytes = Max(max_bytes, {rt, shared_rt}_memory_usage);\n> >\n> > Then we can get rid of all the worry about 1MB/2MB, 64kB, 70kB -- all that.\n>\n> But doesn't it mean that even if we create a shared tidstore with\n> small memory, say 64kB, it actually uses 1MB?\n\nThis sounds like an argument for controlling the minimum DSA segment\nsize. (I'm not really in favor of that, but open to others' opinion)\n\nI wasn't talking about that above -- I was saying we should have only\none place where we clamp max_bytes so that the tree doesn't\nimmediately appear full.\n\n\n", "msg_date": "Tue, 19 Dec 2023 14:36:52 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Dec 19, 2023 at 4:37 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Dec 19, 2023 at 12:37 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Dec 18, 2023 at 3:41 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > Let's do it in just one place. In TidStoreCreate(), do\n> > >\n> > > /* clamp max_bytes to at least the size of the empty tree with\n> > > allocated blocks, so it doesn't immediately appear full */\n> > > ts->control->max_bytes = Max(max_bytes, {rt, shared_rt}_memory_usage);\n> > >\n> > > Then we can get rid of all the worry about 1MB/2MB, 64kB, 70kB -- all that.\n> >\n> > But doesn't it mean that even if we create a shared tidstore with\n> > small memory, say 64kB, it actually uses 1MB?\n>\n> This sounds like an argument for controlling the minimum DSA segment\n> size. (I'm not really in favor of that, but open to others' opinion)\n>\n> I wasn't talking about that above -- I was saying we should have only\n> one place where we clamp max_bytes so that the tree doesn't\n> immediately appear full.\n\nThank you for your clarification. Understood.\n\nI've updated the new patch set that incorporated comments I got so\nfar. 0007, 0008, and 0012 patches are updates from the v45 patch set.\nIn addition to the review comments, I made some changes in tidstore to\nmake it independent from heap. Specifically, it uses MaxOffsetNumber\ninstead of MaxHeapTuplesPerPage. Now we don't need to include\nhtup_details.h. It enlarged MaxBlocktableEntrySize but it's still 272\nbytes.\n\nBTW regarding the previous comment I got before:\n\n> - RT_PTR_ALLOC *slot;\n> + RT_PTR_ALLOC *slot = NULL;\n>\n> We have a macro for invalid pointer because of DSA.\n\nI think that since *slot is a pointer to a RT_PTR_ALLOC it's okay to set NULL.\n\nAs for the initial and maximum DSA segment sizes, I've sent a summary\non that thread:\n\nhttps://www.postgresql.org/message-id/CAD21AoCVMw6DSmgZY9h%2BxfzKtzJeqWiwxaUD2T-FztVcV-XibQ%40mail.gmail.com\n\nI'm going to update RT_DUMP() and RT_DUMP_NODE() codes for the next step.\n\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Wed, 20 Dec 2023 20:35:38 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Dec 20, 2023 at 6:36 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> I've updated the new patch set that incorporated comments I got so\n> far. 0007, 0008, and 0012 patches are updates from the v45 patch set.\n> In addition to the review comments, I made some changes in tidstore to\n> make it independent from heap. Specifically, it uses MaxOffsetNumber\n> instead of MaxHeapTuplesPerPage. Now we don't need to include\n> htup_details.h. It enlarged MaxBlocktableEntrySize but it's still 272\n> bytes.\n\nThat's a good idea.\n\n> BTW regarding the previous comment I got before:\n>\n> > - RT_PTR_ALLOC *slot;\n> > + RT_PTR_ALLOC *slot = NULL;\n> >\n> > We have a macro for invalid pointer because of DSA.\n>\n> I think that since *slot is a pointer to a RT_PTR_ALLOC it's okay to set NULL.\n\nAh right, it's the address of the slot.\n\n> I'm going to update RT_DUMP() and RT_DUMP_NODE() codes for the next step.\n\nThat could probably use some discussion. A few months ago, I found the\ndebugging functions only worked when everything else worked. When\nthings weren't working, I had to rip one of these functions apart so\nit only looked at one node. If something is broken, we can't count on\nrecursion or iteration working, because we won't get that far. I don't\nremember how things are in the current patch.\n\nI've finished the node shrinking and addressed some fixme/todo areas\n-- can I share these and squash your v46 changes first?\n\n\n", "msg_date": "Thu, 21 Dec 2023 08:19:44 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Dec 21, 2023 at 10:19 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Wed, Dec 20, 2023 at 6:36 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > I've updated the new patch set that incorporated comments I got so\n> > far. 0007, 0008, and 0012 patches are updates from the v45 patch set.\n> > In addition to the review comments, I made some changes in tidstore to\n> > make it independent from heap. Specifically, it uses MaxOffsetNumber\n> > instead of MaxHeapTuplesPerPage. Now we don't need to include\n> > htup_details.h. It enlarged MaxBlocktableEntrySize but it's still 272\n> > bytes.\n>\n> That's a good idea.\n>\n> > BTW regarding the previous comment I got before:\n> >\n> > > - RT_PTR_ALLOC *slot;\n> > > + RT_PTR_ALLOC *slot = NULL;\n> > >\n> > > We have a macro for invalid pointer because of DSA.\n> >\n> > I think that since *slot is a pointer to a RT_PTR_ALLOC it's okay to set NULL.\n>\n> Ah right, it's the address of the slot.\n>\n> > I'm going to update RT_DUMP() and RT_DUMP_NODE() codes for the next step.\n>\n> That could probably use some discussion. A few months ago, I found the\n> debugging functions only worked when everything else worked. When\n> things weren't working, I had to rip one of these functions apart so\n> it only looked at one node. If something is broken, we can't count on\n> recursion or iteration working, because we won't get that far. I don't\n> remember how things are in the current patch.\n\nAgreed.\n\nI found the following comment and wanted to discuss:\n\n// this might be better as \"iterate over nodes\", plus a callback to\nRT_DUMP_NODE,\n// which should really only concern itself with single nodes\nRT_SCOPE void\nRT_DUMP(RT_RADIX_TREE *tree)\n\nIf it means we need to somehow use the iteration functions also for\ndumping the whole tree, it would probably need to refactor the\niteration codes so that the RT_DUMP() can use them while dumping\nvisited nodes. But we need to be careful of not adding overheads to\nthe iteration performance.\n\n>\n> I've finished the node shrinking and addressed some fixme/todo areas\n> -- can I share these and squash your v46 changes first?\n\nCool! Yes, please do so.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 21 Dec 2023 10:32:37 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Dec 21, 2023 at 8:33 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> I found the following comment and wanted to discuss:\n>\n> // this might be better as \"iterate over nodes\", plus a callback to\n> RT_DUMP_NODE,\n> // which should really only concern itself with single nodes\n> RT_SCOPE void\n> RT_DUMP(RT_RADIX_TREE *tree)\n>\n> If it means we need to somehow use the iteration functions also for\n> dumping the whole tree, it would probably need to refactor the\n> iteration codes so that the RT_DUMP() can use them while dumping\n> visited nodes. But we need to be careful of not adding overheads to\n> the iteration performance.\n\nYeah, some months ago I thought a callback interface would make some\nthings easier. I don't think we need that at the moment (possibly\nnever), so that comment can be just removed. As far as these debug\nfunctions, I only found useful the stats and dumping a single node,\nFWIW.\n\nI've attached v47, which is v46 plus some fixes for radix tree.\n\n0004 - moves everything for \"delete\" to the end -- gradually other\nthings will be grouped together in a sensible order\n\n0005 - trivial\n\n0006 - shrink nodes -- still needs testing, but nothing crashes yet.\nThis shows some renaming might be good: Previously we had\nRT_CHUNK_CHILDREN_ARRAY_COPY for growing nodes, but for shrinking I've\nadded RT_COPY_ARRAYS_AND_DELETE, since the deletion happens by simply\nnot copying the slot to be deleted. This means when growing it would\nbe more clear to call the former RT_COPY_ARRAYS_FOR_INSERT, since that\nreserves a new slot for the caller in the new node, but the caller\nmust do the insert itself. Note that there are some practical\nrestrictions/best-practices on whether shrinking should happen after\ndeletion or vice versa. Hopefully it's clear, but let me know if the\ndescription can be improved. Also, it doesn't yet shrink from size\nclass 32 to 16, but it could with a bit of work.\n\n0007 - trivial, but could use a better comment. I also need to make\nsure stats reporting works (may also need some cleanup work).\n\n0008 - fixes RT_FREE_RECURSE -- I believe you wondered some months ago\nif DSA could just free all our allocated segments without throwing\naway the DSA, and that's still a good question.\n\n0009 - fixes the assert in RT_ITER_SET_NODE_FROM (btw, I don't think\nthis name is better than RT_UPDATE_ITER_STACK, so maybe we should go\nback to that). The assert doesn't fire, so I guess it does what it's\nsupposed to? For me, the iteration logic is still the most confusing\npiece out of the whole radix tree. Maybe that could be helped with\nsome better variable names, but I wonder if it needs more invasive\nwork. I confess I don't have better ideas for how it would work\ndifferently.\n\n0010 - some fixes for number of children accounting in node256\n\n0011 - Long overdue pgindent of radixtree.h, without trying to fix up\nafterwards. Feel free to throw out and redo if this interferes with\nongoing work.\n\nThe rest are from your v46. The bench doesn't work for tid store\nanymore, so I squashed \"disable bench for CI\" until we get back to\nthat. Some more review comments (note: patch numbers are for v47, but\nI changed nothing from v46 in this area):\n\n0013:\n\n+ * Internally, a tid is encoded as a pair of 64-bit key and 64-bit value,\n+ * and stored in the radix tree.\n\nRecently outdated. The variable length values seems to work, so let's\nmake everything match.\n\n+#define MAX_TUPLES_PER_PAGE MaxOffsetNumber\n\nMaybe we don't need this macro anymore? The name no longer fits, in any case.\n\n+TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets,\n+ int num_offsets)\n+{\n+ char buf[MaxBlocktableEntrySize];\n+ BlocktableEntry *page = (BlocktableEntry *) buf;\n\nI'm not sure this is safe with alignment. Maybe rather than plain\n\"char\", it needs to be a union with BlocktableEntry, or something.\n\n+static inline BlocktableEntry *\n+tidstore_iter_kv(TidStoreIter *iter, uint64 *key)\n+{\n+ if (TidStoreIsShared(iter->ts))\n+ return shared_rt_iterate_next(iter->tree_iter.shared, key);\n+\n+ return local_rt_iterate_next(iter->tree_iter.local, key);\n+}\n\nIn the old encoding scheme, this function did something important, but\nnow it's a useless wrapper with one caller.\n\n+ /*\n+ * In the shared case, TidStoreControl and radix_tree are backed by the\n+ * same DSA area and rt_memory_usage() returns the value including both.\n+ * So we don't need to add the size of TidStoreControl separately.\n+ */\n+ if (TidStoreIsShared(ts))\n+ return sizeof(TidStore) + shared_rt_memory_usage(ts->tree.shared);\n+\n+ return sizeof(TidStore) + sizeof(TidStore) +\nlocal_rt_memory_usage(ts->tree.local);\n\nI don't see the point in including these tiny structs, since we will\nalways blow past the limit by a number of kilobytes (at least, often\nmegabytes or more) at the time it happens.\n\n+ iter->output.max_offset = 64;\n\nMaybe needs a comment that this is just some starting size and not\nanything particular.\n\n+ iter->output.offsets = palloc(sizeof(OffsetNumber) * iter->output.max_offset);\n\n+ /* Make sure there is enough space to add offsets */\n+ if (result->num_offsets + bmw_popcount(w) > result->max_offset)\n+ {\n+ result->max_offset *= 2;\n+ result->offsets = repalloc(result->offsets,\n+ sizeof(OffsetNumber) * result->max_offset);\n+ }\n\npopcount()-ing for every array element in every value is expensive --\nlet's just add sizeof(bitmapword). It's not that wasteful, but then\nthe initial max will need to be 128.\n\nAbout separation of responsibilities for locking: The only thing\ncurrently where the tid store is not locked is tree iteration. That's\na strange exception. Also, we've recently made RT_FIND return a\npointer, so the caller must somehow hold a share lock, but I think we\nhaven't exposed callers the ability to do that, and we rely on the tid\nstore lock for that. We have a mix of tree locking and tid store\nlocking. We will need to consider carefully how to make this more\nclear, maintainable, and understandable.\n\n0015:\n\n\"XXX: some regression test fails since this commit changes the minimum\nm_w_m to 2048 from 1024. This was necessary for the pervious memory\"\n\nThis shouldn't fail anymore if the \"one-place\" clamp was in a patch\nbefore this. If so, lets take out that GUC change and worry about\nmin/max size separately. If it still fails, I'd like to know why.\n\n- * lazy_vacuum_heap_page() -- free page's LP_DEAD items listed in the\n- * vacrel->dead_items array.\n+ * lazy_vacuum_heap_page() -- free page's LP_DEAD items listed in\nthe TID store.\n\nWhat I was getting at earlier is that the first line here doesn't\nreally need to change, we can just s/array/store/ ?\n\n-static int\n-lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,\n- int index, Buffer vmbuffer)\n+static void\n+lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno,\n+ OffsetNumber *deadoffsets,\nint num_offsets, Buffer buffer,\n+ Buffer vmbuffer)\n\n\"buffer\" should still come after \"blkno\", so that line doesn't need to change.\n\n$ git diff master -- src/backend/access/heap/ | grep has_lpdead_items\n- bool has_lpdead_items; /* includes existing LP_DEAD items */\n- * pruning and freezing. all_visible implies !has_lpdead_items, but don't\n- Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);\n- if (prunestate.has_lpdead_items)\n- else if (prunestate.has_lpdead_items && PageIsAllVisible(page))\n- if (prunestate.has_lpdead_items && vacrel->do_index_vacuuming)\n- prunestate->has_lpdead_items = false;\n- prunestate->has_lpdead_itemshas_lpdead_itemshas_lpdead_itemshas_lpdead_items\n= true;\n\nIn a green field, it'd be fine to replace these with an expression of\n\"num_offsets\", but it adds a bit of noise for reviewers and the git\nlog. Is it really necessary?\n\n- deadoffsets[lpdead_items++] = offnum;\n+\nprunestate->deadoffsets[prunestate->num_offsets++] = offnum;\n\n I'm also not quite sure why \"deadoffsets\" and \"lpdead_items\" got\nmoved to the PruneState. The latter was renamed in a way that makes\nmore sense, but I don't see why the churn is necessary.\n\n@@ -1875,28 +1882,9 @@ lazy_scan_prune(LVRelState *vacrel,\n }\n #endif\n\n- /*\n- * Now save details of the LP_DEAD items from the page in vacrel\n- */\n- if (lpdead_items > 0)\n+ if (prunestate->num_offsets > 0)\n {\n- VacDeadItems *dead_items = vacrel->dead_items;\n- ItemPointerData tmp;\n-\n vacrel->lpdead_item_pages++;\n- prunestate->has_lpdead_items = true;\n-\n- ItemPointerSetBlockNumber(&tmp, blkno);\n-\n- for (int i = 0; i < lpdead_items; i++)\n- {\n- ItemPointerSetOffsetNumber(&tmp, deadoffsets[i]);\n- dead_items->items[dead_items->num_items++] = tmp;\n- }\n-\n- Assert(dead_items->num_items <= dead_items->max_items);\n- pgstat_progress_update_param(PROGRESS_VACUUM_NUM_DEAD_TUPLES,\n-\n dead_items->num_items);\n\nI don't understand why this block got removed and nothing new is\nadding anything to the tid store.\n\n@@ -1087,7 +1088,16 @@ lazy_scan_heap(LVRelState *vacrel)\n * with prunestate-driven visibility map and\nFSM steps (just like\n * the two-pass strategy).\n */\n- Assert(dead_items->num_items == 0);\n+ Assert(TidStoreNumTids(dead_items) == 0);\n+ }\n+ else if (prunestate.num_offsets > 0)\n+ {\n+ /* Save details of the LP_DEAD items from the\npage in dead_items */\n+ TidStoreSetBlockOffsets(dead_items, blkno,\nprunestate.deadoffsets,\n+\n prunestate.num_offsets);\n+\n+\npgstat_progress_update_param(PROGRESS_VACUUM_DEAD_TUPLE_BYTES,\n+\n TidStoreMemoryUsage(dead_items));\n\nI guess it was added here, 800 lines away? If so, why?\n\nAbout progress reporting: I want to make sure no one is going to miss\ncounting \"num_dead_tuples\". It's no longer relevant for the number of\nindex scans we need to do, but do admins still have a use for it?\nSomething to think about later.\n\n0017\n\n+ /*\n+ * max_bytes is forced to be at least 64kB, the current minimum valid\n+ * value for the work_mem GUC.\n+ */\n+ max_bytes = Max(64 * 1024L, max_bytes);\n\nIf this still needs to be here, I still don't understand why.", "msg_date": "Thu, 21 Dec 2023 14:41:37 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2023-12-21 14:41:37 +0700, John Naylor wrote:\n> I've attached v47, which is v46 plus some fixes for radix tree.\n\nCould either of you summarize what the design changes you've made in the last\nmonths are and why you've done them? Unfortunately this thread is very long,\nand the comments in the file just say \"FIXME\" in places that apparently are\naffected by design changes. This makes it hard to catch up here.\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Thu, 21 Dec 2023 03:27:41 -0800", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Dec 21, 2023 at 6:27 PM Andres Freund <andres@anarazel.de> wrote:\n>\n> Could either of you summarize what the design changes you've made in the last\n> months are and why you've done them? Unfortunately this thread is very long,\n> and the comments in the file just say \"FIXME\" in places that apparently are\n> affected by design changes. This makes it hard to catch up here.\n\nI'd be happy to try, since we are about due for a summary. I was also\nhoping to reach a coherent-enough state sometime in early January to\nrequest your feedback, so good timing. Not sure how much detail to go\ninto, but here goes:\n\nBack in May [1], the method of value storage shifted towards \"combined\npointer-value slots\", which was described and recommended in the\npaper. There were some other changes for simplicity and efficiency,\nbut none as far-reaching as this.\n\nThis is enabled by using the template architecture that we adopted\nlong ago for different reasons. Fixed length values are either stored\nin the slot of the last-level node (if the value fits into the\nplatform's pointer), or are a \"single-value\" leaf (otherwise).\n\nFor tid store, we want to eventually support bitmap heap scans (in\naddition to vacuum), and in doing so make it independent of heap AM.\nThat means value types similar to PageTableEntry tidbitmap.c, but with\na variable number of bitmapwords.\n\nThat required radix tree to support variable length values. That has\nbeen the main focus in the last several months, and it basically works\nnow.\n\nTo my mind, the biggest architectural issues in the patch today are:\n\n- Variable-length values means that pointers are passed around in\nplaces. This will require some shifting responsibility for locking to\nthe caller, or longer-term maybe a callback interface. (This is new,\nthe below are pre-existing issues.)\n- The tid store has its own \"control object\" (when shared memory is\nneeded) with its own lock, in addition to the same for the associated\nradix tree. This leads to unnecessary double-locking. This area needs\nsome attention.\n- Memory accounting is still unsettled. The current thinking is to cap\nmax block/segment size, scaled to a fraction of m_w_m, but there are\nstill open questions.\n\nThere has been some recent effort toward finishing work started\nearlier, like shrinking nodes. There a couple places that can still\nuse either simplification or optimization, but otherwise work fine.\nMost of the remaining fixmes/todos/wips are trivial; a few are\nactually outdated now that I look again, and will be removed shortly.\nThe regression tests could use some tidying up.\n\n-John\n\n[1] https://www.postgresql.org/message-id/CAFBsxsFyWLxweHVDtKb7otOCR4XdQGYR4b%2B9svxpVFnJs08BmQ%40mail.gmail.com\n\n\n", "msg_date": "Fri, 22 Dec 2023 21:09:33 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Dec 21, 2023 at 4:41 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Dec 21, 2023 at 8:33 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > I found the following comment and wanted to discuss:\n> >\n> > // this might be better as \"iterate over nodes\", plus a callback to\n> > RT_DUMP_NODE,\n> > // which should really only concern itself with single nodes\n> > RT_SCOPE void\n> > RT_DUMP(RT_RADIX_TREE *tree)\n> >\n> > If it means we need to somehow use the iteration functions also for\n> > dumping the whole tree, it would probably need to refactor the\n> > iteration codes so that the RT_DUMP() can use them while dumping\n> > visited nodes. But we need to be careful of not adding overheads to\n> > the iteration performance.\n>\n> Yeah, some months ago I thought a callback interface would make some\n> things easier. I don't think we need that at the moment (possibly\n> never), so that comment can be just removed. As far as these debug\n> functions, I only found useful the stats and dumping a single node,\n> FWIW.\n>\n> I've attached v47, which is v46 plus some fixes for radix tree.\n>\n> 0004 - moves everything for \"delete\" to the end -- gradually other\n> things will be grouped together in a sensible order\n>\n> 0005 - trivial\n\nLGTM.\n\n>\n> 0006 - shrink nodes -- still needs testing, but nothing crashes yet.\n\nCool. The coverage test results showed the shrink codes are also covered.\n\n> This shows some renaming might be good: Previously we had\n> RT_CHUNK_CHILDREN_ARRAY_COPY for growing nodes, but for shrinking I've\n> added RT_COPY_ARRAYS_AND_DELETE, since the deletion happens by simply\n> not copying the slot to be deleted. This means when growing it would\n> be more clear to call the former RT_COPY_ARRAYS_FOR_INSERT, since that\n> reserves a new slot for the caller in the new node, but the caller\n> must do the insert itself.\n\nAgreed.\n\n> Note that there are some practical\n> restrictions/best-practices on whether shrinking should happen after\n> deletion or vice versa. Hopefully it's clear, but let me know if the\n> description can be improved. Also, it doesn't yet shrink from size\n> class 32 to 16, but it could with a bit of work.\n\nSounds reasonable.\n\n>\n> 0007 - trivial, but could use a better comment. I also need to make\n> sure stats reporting works (may also need some cleanup work).\n>\n> 0008 - fixes RT_FREE_RECURSE -- I believe you wondered some months ago\n> if DSA could just free all our allocated segments without throwing\n> away the DSA, and that's still a good question.\n\nLGTM.\n\n>\n> 0009 - fixes the assert in RT_ITER_SET_NODE_FROM (btw, I don't think\n> this name is better than RT_UPDATE_ITER_STACK, so maybe we should go\n> back to that).\n\nWill rename it.\n\n> The assert doesn't fire, so I guess it does what it's\n> supposed to?\n\nYes.\n\n> For me, the iteration logic is still the most confusing\n> piece out of the whole radix tree. Maybe that could be helped with\n> some better variable names, but I wonder if it needs more invasive\n> work.\n\nTrue. Maybe more comments would also help.\n\n>\n> 0010 - some fixes for number of children accounting in node256\n>\n> 0011 - Long overdue pgindent of radixtree.h, without trying to fix up\n> afterwards. Feel free to throw out and redo if this interferes with\n> ongoing work.\n>\n\nLGTM.\n\nI'm working on the below review comments and most of them are already\nincorporated on the local branch:\n\n> The rest are from your v46. The bench doesn't work for tid store\n> anymore, so I squashed \"disable bench for CI\" until we get back to\n> that. Some more review comments (note: patch numbers are for v47, but\n> I changed nothing from v46 in this area):\n>\n> 0013:\n>\n> + * Internally, a tid is encoded as a pair of 64-bit key and 64-bit value,\n> + * and stored in the radix tree.\n>\n> Recently outdated. The variable length values seems to work, so let's\n> make everything match.\n>\n> +#define MAX_TUPLES_PER_PAGE MaxOffsetNumber\n>\n> Maybe we don't need this macro anymore? The name no longer fits, in any case.\n\nRemoved.\n\n>\n> +TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets,\n> + int num_offsets)\n> +{\n> + char buf[MaxBlocktableEntrySize];\n> + BlocktableEntry *page = (BlocktableEntry *) buf;\n>\n> I'm not sure this is safe with alignment. Maybe rather than plain\n> \"char\", it needs to be a union with BlocktableEntry, or something.\n\nI tried it in the new patch set but could you explain why it could not\nbe safe with alignment?\n\n>\n> +static inline BlocktableEntry *\n> +tidstore_iter_kv(TidStoreIter *iter, uint64 *key)\n> +{\n> + if (TidStoreIsShared(iter->ts))\n> + return shared_rt_iterate_next(iter->tree_iter.shared, key);\n> +\n> + return local_rt_iterate_next(iter->tree_iter.local, key);\n> +}\n>\n> In the old encoding scheme, this function did something important, but\n> now it's a useless wrapper with one caller.\n\nRemoved.\n\n>\n> + /*\n> + * In the shared case, TidStoreControl and radix_tree are backed by the\n> + * same DSA area and rt_memory_usage() returns the value including both.\n> + * So we don't need to add the size of TidStoreControl separately.\n> + */\n> + if (TidStoreIsShared(ts))\n> + return sizeof(TidStore) + shared_rt_memory_usage(ts->tree.shared);\n> +\n> + return sizeof(TidStore) + sizeof(TidStore) +\n> local_rt_memory_usage(ts->tree.local);\n>\n> I don't see the point in including these tiny structs, since we will\n> always blow past the limit by a number of kilobytes (at least, often\n> megabytes or more) at the time it happens.\n\nAgreed, removed.\n\n>\n> + iter->output.max_offset = 64;\n>\n> Maybe needs a comment that this is just some starting size and not\n> anything particular.\n>\n> + iter->output.offsets = palloc(sizeof(OffsetNumber) * iter->output.max_offset);\n>\n> + /* Make sure there is enough space to add offsets */\n> + if (result->num_offsets + bmw_popcount(w) > result->max_offset)\n> + {\n> + result->max_offset *= 2;\n> + result->offsets = repalloc(result->offsets,\n> + sizeof(OffsetNumber) * result->max_offset);\n> + }\n>\n> popcount()-ing for every array element in every value is expensive --\n> let's just add sizeof(bitmapword). It's not that wasteful, but then\n> the initial max will need to be 128.\n\nGood idea.\n\n>\n> About separation of responsibilities for locking: The only thing\n> currently where the tid store is not locked is tree iteration. That's\n> a strange exception. Also, we've recently made RT_FIND return a\n> pointer, so the caller must somehow hold a share lock, but I think we\n> haven't exposed callers the ability to do that, and we rely on the tid\n> store lock for that. We have a mix of tree locking and tid store\n> locking. We will need to consider carefully how to make this more\n> clear, maintainable, and understandable.\n\nYes, tidstore should be locked during the iteration.\n\nOne simple direction about locking is that the radix tree has the lock\nbut no APIs hold/release it. It's the caller's responsibility. If a\ndata structure using a radix tree for its storage has its own lock\n(like tidstore), it can use it instead of the radix tree's one. A\ndownside would be that it's probably hard to support a better locking\nalgorithm such as ROWEX in the radix tree. Another variant of APIs\nthat also does locking/unlocking within APIs might help.\n\n>\n> 0015:\n>\n> \"XXX: some regression test fails since this commit changes the minimum\n> m_w_m to 2048 from 1024. This was necessary for the pervious memory\"\n>\n> This shouldn't fail anymore if the \"one-place\" clamp was in a patch\n> before this. If so, lets take out that GUC change and worry about\n> min/max size separately. If it still fails, I'd like to know why.\n\nAgreed.\n\n>\n> - * lazy_vacuum_heap_page() -- free page's LP_DEAD items listed in the\n> - * vacrel->dead_items array.\n> + * lazy_vacuum_heap_page() -- free page's LP_DEAD items listed in\n> the TID store.\n>\n> What I was getting at earlier is that the first line here doesn't\n> really need to change, we can just s/array/store/ ?\n\nFixed.\n\n>\n> -static int\n> -lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,\n> - int index, Buffer vmbuffer)\n> +static void\n> +lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno,\n> + OffsetNumber *deadoffsets,\n> int num_offsets, Buffer buffer,\n> + Buffer vmbuffer)\n>\n> \"buffer\" should still come after \"blkno\", so that line doesn't need to change.\n\nFixed.\n\n>\n> $ git diff master -- src/backend/access/heap/ | grep has_lpdead_items\n> - bool has_lpdead_items; /* includes existing LP_DEAD items */\n> - * pruning and freezing. all_visible implies !has_lpdead_items, but don't\n> - Assert(!prunestate.all_visible || !prunestate.has_lpdead_items);\n> - if (prunestate.has_lpdead_items)\n> - else if (prunestate.has_lpdead_items && PageIsAllVisible(page))\n> - if (prunestate.has_lpdead_items && vacrel->do_index_vacuuming)\n> - prunestate->has_lpdead_items = false;\n> - prunestate->has_lpdead_itemshas_lpdead_itemshas_lpdead_itemshas_lpdead_items\n> = true;\n>\n> In a green field, it'd be fine to replace these with an expression of\n> \"num_offsets\", but it adds a bit of noise for reviewers and the git\n> log. Is it really necessary?\n\nI see your point. I think we can live with having both\nhas_lpdead_items and num_offsets. But we will have to check if these\nvalues are consistent, which could be less maintainable.\n\n>\n> - deadoffsets[lpdead_items++] = offnum;\n> +\n> prunestate->deadoffsets[prunestate->num_offsets++] = offnum;\n>\n> I'm also not quite sure why \"deadoffsets\" and \"lpdead_items\" got\n> moved to the PruneState. The latter was renamed in a way that makes\n> more sense, but I don't see why the churn is necessary.\n>\n> @@ -1875,28 +1882,9 @@ lazy_scan_prune(LVRelState *vacrel,\n> }\n> #endif\n>\n> - /*\n> - * Now save details of the LP_DEAD items from the page in vacrel\n> - */\n> - if (lpdead_items > 0)\n> + if (prunestate->num_offsets > 0)\n> {\n> - VacDeadItems *dead_items = vacrel->dead_items;\n> - ItemPointerData tmp;\n> -\n> vacrel->lpdead_item_pages++;\n> - prunestate->has_lpdead_items = true;\n> -\n> - ItemPointerSetBlockNumber(&tmp, blkno);\n> -\n> - for (int i = 0; i < lpdead_items; i++)\n> - {\n> - ItemPointerSetOffsetNumber(&tmp, deadoffsets[i]);\n> - dead_items->items[dead_items->num_items++] = tmp;\n> - }\n> -\n> - Assert(dead_items->num_items <= dead_items->max_items);\n> - pgstat_progress_update_param(PROGRESS_VACUUM_NUM_DEAD_TUPLES,\n> -\n> dead_items->num_items);\n>\n> I don't understand why this block got removed and nothing new is\n> adding anything to the tid store.\n>\n> @@ -1087,7 +1088,16 @@ lazy_scan_heap(LVRelState *vacrel)\n> * with prunestate-driven visibility map and\n> FSM steps (just like\n> * the two-pass strategy).\n> */\n> - Assert(dead_items->num_items == 0);\n> + Assert(TidStoreNumTids(dead_items) == 0);\n> + }\n> + else if (prunestate.num_offsets > 0)\n> + {\n> + /* Save details of the LP_DEAD items from the\n> page in dead_items */\n> + TidStoreSetBlockOffsets(dead_items, blkno,\n> prunestate.deadoffsets,\n> +\n> prunestate.num_offsets);\n> +\n> +\n> pgstat_progress_update_param(PROGRESS_VACUUM_DEAD_TUPLE_BYTES,\n> +\n> TidStoreMemoryUsage(dead_items));\n>\n> I guess it was added here, 800 lines away? If so, why?\n\nThe above changes are related. The idea is not to use tidstore in a\none-pass strategy. If the table doesn't have any indexes, in\nlazy_scan_prune() we collect offset numbers of dead tuples on the page\nand vacuum the page using them. In this case, we don't need to use\ntidstore so we pass the offsets array to lazy_vacuum_heap_page(). The\nLVPagePruneState is a convenient place to store collected offset\nnumbers.\n\n>\n> About progress reporting: I want to make sure no one is going to miss\n> counting \"num_dead_tuples\". It's no longer relevant for the number of\n> index scans we need to do, but do admins still have a use for it?\n> Something to think about later.\n\nI'm not sure if the user will still need num_dead_tuples in progress\nreporting view. The total number of dead tuples might be useful but\nthe verbose log already shows that.\n\n>\n> 0017\n>\n> + /*\n> + * max_bytes is forced to be at least 64kB, the current minimum valid\n> + * value for the work_mem GUC.\n> + */\n> + max_bytes = Max(64 * 1024L, max_bytes);\n>\n> If this still needs to be here, I still don't understand why.\n\nRemoved.\n\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 26 Dec 2023 14:42:24 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Dec 26, 2023 at 12:43 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Dec 21, 2023 at 4:41 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > +TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets,\n> > + int num_offsets)\n> > +{\n> > + char buf[MaxBlocktableEntrySize];\n> > + BlocktableEntry *page = (BlocktableEntry *) buf;\n> >\n> > I'm not sure this is safe with alignment. Maybe rather than plain\n> > \"char\", it needs to be a union with BlocktableEntry, or something.\n>\n> I tried it in the new patch set but could you explain why it could not\n> be safe with alignment?\n\nI was thinking because \"buf\" is just an array of bytes. But, since the\nnext declaration is a cast to a pointer to the actual type, maybe we\ncan rely on the compiler to do the right thing. (It seems to on my\nmachine in any case)\n\n> > About separation of responsibilities for locking: The only thing\n> > currently where the tid store is not locked is tree iteration. That's\n> > a strange exception. Also, we've recently made RT_FIND return a\n> > pointer, so the caller must somehow hold a share lock, but I think we\n> > haven't exposed callers the ability to do that, and we rely on the tid\n> > store lock for that. We have a mix of tree locking and tid store\n> > locking. We will need to consider carefully how to make this more\n> > clear, maintainable, and understandable.\n>\n> Yes, tidstore should be locked during the iteration.\n>\n> One simple direction about locking is that the radix tree has the lock\n> but no APIs hold/release it. It's the caller's responsibility. If a\n> data structure using a radix tree for its storage has its own lock\n> (like tidstore), it can use it instead of the radix tree's one. A\n\nIt looks like the only reason tidstore has its own lock is because it\nhas no way to delegate locking to the tree's lock. Instead of working\naround the limitations of the thing we've designed, let's make it work\nfor the one use case we have. I think we need to expose RT_LOCK_*\nfunctions to the outside, and have tid store use them. That would\nallow us to simplify all those \"if (TidStoreIsShared(ts)\nLWLockAcquire(..., ...)\" calls, which are complex and often redundant.\n\nAt some point, we'll probably want to keep locking inside, at least to\nsmooth the way for fine-grained locking you mentioned.\n\n> > In a green field, it'd be fine to replace these with an expression of\n> > \"num_offsets\", but it adds a bit of noise for reviewers and the git\n> > log. Is it really necessary?\n>\n> I see your point. I think we can live with having both\n> has_lpdead_items and num_offsets. But we will have to check if these\n> values are consistent, which could be less maintainable.\n\nIt would be clearer if that removal was split out into a separate patch.\n\n> > I'm also not quite sure why \"deadoffsets\" and \"lpdead_items\" got\n> > moved to the PruneState. The latter was renamed in a way that makes\n> > more sense, but I don't see why the churn is necessary.\n...\n> > I guess it was added here, 800 lines away? If so, why?\n>\n> The above changes are related. The idea is not to use tidstore in a\n> one-pass strategy. If the table doesn't have any indexes, in\n> lazy_scan_prune() we collect offset numbers of dead tuples on the page\n> and vacuum the page using them. In this case, we don't need to use\n> tidstore so we pass the offsets array to lazy_vacuum_heap_page(). The\n> LVPagePruneState is a convenient place to store collected offset\n> numbers.\n\nOkay, that makes sense, but if it was ever explained, I don't\nremember, and there is nothing in the commit message either.\n\nI'm not sure this can be split up easily, but if so it might help reviewing.\n\nThis change also leads to a weird-looking control flow:\n\nif (vacrel->nindexes == 0)\n{\n if (prunestate.num_offsets > 0)\n {\n ...\n }\n}\nelse if (prunestate.num_offsets > 0)\n{\n ...\n}\n\n\n", "msg_date": "Wed, 27 Dec 2023 10:07:48 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Dec 27, 2023 at 12:08 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Dec 26, 2023 at 12:43 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Dec 21, 2023 at 4:41 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > +TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets,\n> > > + int num_offsets)\n> > > +{\n> > > + char buf[MaxBlocktableEntrySize];\n> > > + BlocktableEntry *page = (BlocktableEntry *) buf;\n> > >\n> > > I'm not sure this is safe with alignment. Maybe rather than plain\n> > > \"char\", it needs to be a union with BlocktableEntry, or something.\n> >\n> > I tried it in the new patch set but could you explain why it could not\n> > be safe with alignment?\n>\n> I was thinking because \"buf\" is just an array of bytes. But, since the\n> next declaration is a cast to a pointer to the actual type, maybe we\n> can rely on the compiler to do the right thing. (It seems to on my\n> machine in any case)\n\nOkay, I kept it.\n\n>\n> > > About separation of responsibilities for locking: The only thing\n> > > currently where the tid store is not locked is tree iteration. That's\n> > > a strange exception. Also, we've recently made RT_FIND return a\n> > > pointer, so the caller must somehow hold a share lock, but I think we\n> > > haven't exposed callers the ability to do that, and we rely on the tid\n> > > store lock for that. We have a mix of tree locking and tid store\n> > > locking. We will need to consider carefully how to make this more\n> > > clear, maintainable, and understandable.\n> >\n> > Yes, tidstore should be locked during the iteration.\n> >\n> > One simple direction about locking is that the radix tree has the lock\n> > but no APIs hold/release it. It's the caller's responsibility. If a\n> > data structure using a radix tree for its storage has its own lock\n> > (like tidstore), it can use it instead of the radix tree's one. A\n>\n> It looks like the only reason tidstore has its own lock is because it\n> has no way to delegate locking to the tree's lock. Instead of working\n> around the limitations of the thing we've designed, let's make it work\n> for the one use case we have. I think we need to expose RT_LOCK_*\n> functions to the outside, and have tid store use them. That would\n> allow us to simplify all those \"if (TidStoreIsShared(ts)\n> LWLockAcquire(..., ...)\" calls, which are complex and often redundant.\n\nI agree that we expose RT_LOCK_* functions and have tidstore use them,\nbut am not sure the if (TidStoreIsShared(ts) LWLockAcquire(..., ...)\"\ncalls part. I think that even if we expose them, we will still need to\ndo something like \"if (TidStoreIsShared(ts))\nshared_rt_lock_share(ts->tree.shared)\", no?\n\n>\n> At some point, we'll probably want to keep locking inside, at least to\n> smooth the way for fine-grained locking you mentioned.\n>\n> > > In a green field, it'd be fine to replace these with an expression of\n> > > \"num_offsets\", but it adds a bit of noise for reviewers and the git\n> > > log. Is it really necessary?\n> >\n> > I see your point. I think we can live with having both\n> > has_lpdead_items and num_offsets. But we will have to check if these\n> > values are consistent, which could be less maintainable.\n>\n> It would be clearer if that removal was split out into a separate patch.\n\nAgreed.\n\n>\n> > > I'm also not quite sure why \"deadoffsets\" and \"lpdead_items\" got\n> > > moved to the PruneState. The latter was renamed in a way that makes\n> > > more sense, but I don't see why the churn is necessary.\n> ...\n> > > I guess it was added here, 800 lines away? If so, why?\n> >\n> > The above changes are related. The idea is not to use tidstore in a\n> > one-pass strategy. If the table doesn't have any indexes, in\n> > lazy_scan_prune() we collect offset numbers of dead tuples on the page\n> > and vacuum the page using them. In this case, we don't need to use\n> > tidstore so we pass the offsets array to lazy_vacuum_heap_page(). The\n> > LVPagePruneState is a convenient place to store collected offset\n> > numbers.\n>\n> Okay, that makes sense, but if it was ever explained, I don't\n> remember, and there is nothing in the commit message either.\n>\n> I'm not sure this can be split up easily, but if so it might help reviewing.\n\nAgreed.\n\n>\n> This change also leads to a weird-looking control flow:\n>\n> if (vacrel->nindexes == 0)\n> {\n> if (prunestate.num_offsets > 0)\n> {\n> ...\n> }\n> }\n> else if (prunestate.num_offsets > 0)\n> {\n> ...\n> }\n\nFixed.\n\nI've attached a new patch set. From v47 patch, I've merged your\nchanges for radix tree, and split the vacuum integration patch into 3\npatches: simply replaces VacDeadItems with TidsTore (0007 patch), and\nuse a simple TID array for one-pass strategy (0008 patch), and replace\nhas_lpdead_items with \"num_offsets > 0\" (0009 patch), while\nincorporating your review comments on the vacuum integration patch\n(sorry for making it difficult to see the changes from v47 patch).\n0013 to 0015 patches are also updates from v47 patch.\n\nI'm thinking that we should change the order of the patches so that\ntidstore patch requires the patch for changing DSA segment sizes. That\nway, we can remove the complex max memory calculation part that we no\nlonger use from the tidstore patch.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 2 Jan 2024 22:01:07 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 2, 2024 at 8:01 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> I agree that we expose RT_LOCK_* functions and have tidstore use them,\n> but am not sure the if (TidStoreIsShared(ts) LWLockAcquire(..., ...)\"\n> calls part. I think that even if we expose them, we will still need to\n> do something like \"if (TidStoreIsShared(ts))\n> shared_rt_lock_share(ts->tree.shared)\", no?\n\nI'll come back to this topic separately.\n\n> I've attached a new patch set. From v47 patch, I've merged your\n> changes for radix tree, and split the vacuum integration patch into 3\n> patches: simply replaces VacDeadItems with TidsTore (0007 patch), and\n> use a simple TID array for one-pass strategy (0008 patch), and replace\n> has_lpdead_items with \"num_offsets > 0\" (0009 patch), while\n> incorporating your review comments on the vacuum integration patch\n\nNice!\n\n> (sorry for making it difficult to see the changes from v47 patch).\n\nIt's actually pretty clear. I just have a couple comments before\nsharing my latest cleanups:\n\n(diff'ing between v47 and v48):\n\n-- /*\n- * In the shared case, TidStoreControl and radix_tree are backed by the\n- * same DSA area and rt_memory_usage() returns the value including both.\n- * So we don't need to add the size of TidStoreControl separately.\n- */\n if (TidStoreIsShared(ts))\n- return sizeof(TidStore) +\nshared_rt_memory_usage(ts->tree.shared);\n+ rt_mem = shared_rt_memory_usage(ts->tree.shared);\n+ else\n+ rt_mem = local_rt_memory_usage(ts->tree.local);\n\n- return sizeof(TidStore) + sizeof(TidStore) +\nlocal_rt_memory_usage(ts->tree.local);\n+ return sizeof(TidStore) + sizeof(TidStoreControl) + rt_mem;\n\nUpthread, I meant that I don't see the need to include the size of\nthese structs *at all*. They're tiny, and the blocks/segments will\nalmost certainly have some empty space counted in the total anyway.\nThe returned size is already overestimated, so this extra code is just\na distraction.\n\n- if (result->num_offsets + bmw_popcount(w) > result->max_offset)\n+ if (result->num_offsets + (sizeof(bitmapword) * BITS_PER_BITMAPWORD)\n>= result->max_offset)\n\nI believe this math is wrong. We care about \"result->num_offsets +\nBITS_PER_BITMAPWORD\", right?\nAlso, it seems if the condition evaluates to equal, we still have\nenough space, in which case \">\" the max is the right condition.\n\n- if (off < 1 || off > MAX_TUPLES_PER_PAGE)\n+ if (off < 1 || off > MaxOffsetNumber)\n\nThis can now use OffsetNumberIsValid().\n\n> 0013 to 0015 patches are also updates from v47 patch.\n\n> I'm thinking that we should change the order of the patches so that\n> tidstore patch requires the patch for changing DSA segment sizes. That\n> way, we can remove the complex max memory calculation part that we no\n> longer use from the tidstore patch.\n\nI don't think there is any reason to have those calculations at all at\nthis point. Every patch in every version should at least *work\ncorrectly*, without kludging m_w_m and without constraining max\nsegment size. I'm fine with the latter remaining in its own thread,\nand I hope we can consider it an enhancement that respects the admin's\nconfigured limits more effectively, and not a pre-requisite for not\nbreaking. I *think* we're there now, but it's hard to tell since 0015\nwas at the very end. As I said recently, if something still fails, I'd\nlike to know why. So for v49, I took the liberty of removing the DSA\nmax segment patches for now, and squashing v48-0015.\n\nIn addition for v49, I have quite a few cleanups:\n\n0001 - This hasn't been touched in a very long time, but I ran\npgindent and clarified a comment\n0002 - We no longer need to isolate the rightmost bit anywhere, so\nremoved that part and revised the commit message accordingly.\n\nradix tree:\n0003 - v48 plus squashed v48-0013\n0004 - Removed or adjusted WIP, FIXME, TODO items. Some were outdated,\nand I fixed most of the rest.\n0005 - Remove the RT_PTR_LOCAL macro, since it's not really useful anymore.\n0006 - RT_FREE_LEAF only needs the allocated pointer, so pass that. A\nbit simpler.\n0007 - Uses the same idea from a previous cleanup of RT_SET, for RT_DELETE.\n0008 - Removes a holdover from the multi-value leaves era.\n0009 - It occurred to me that we need to have unique names for memory\ncontexts for different instantiations of the template. This is one way\nto do it, by using the configured RT_PREFIX in the context name. I\nalso took an extra step to make the size class fanout show up\ncorrectly on different platforms, but that's probably overkill and\nundesirable, and I'll probably use only the class name next time.\n0010/11 - Make the array functions less surprising and with more\ninformative names.\n0012 - Restore a useful technique from Andres's prototype. This part\nhas been slow for a long time, so much that it showed up in a profile\nwhere this path wasn't even taken much.\n\ntid store / vacuum:\n0013/14 - Same as v48 TID store, with review squashed\n0015 - Rationalize comment and starting value.\n0016 - I applied the removal of the old clamps from v48-0011 (init/max\nDSA), and left out the rest for now.\n0017-20 - Vacuum and debug tidstore as in v48, with v48-0015 squashed\n\nI'll bring up locking again shortly.", "msg_date": "Wed, 3 Jan 2024 21:10:11 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jan 3, 2024 at 9:10 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Jan 2, 2024 at 8:01 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I agree that we expose RT_LOCK_* functions and have tidstore use them,\n> > but am not sure the if (TidStoreIsShared(ts) LWLockAcquire(..., ...)\"\n> > calls part. I think that even if we expose them, we will still need to\n> > do something like \"if (TidStoreIsShared(ts))\n> > shared_rt_lock_share(ts->tree.shared)\", no?\n>\n> I'll come back to this topic separately.\n\nTo answer your question, sure, but that \"if (TidStoreIsShared(ts))\"\npart would be pushed down into a function so that only one place has\nto care about it.\n\nHowever, I'm starting to question whether we even need that. Meaning,\nlock the tidstore separately. To \"lock the tidstore\" means to take a\nlock, _separate_ from the radix tree's internal lock, to control\naccess to two fields in a separate \"control object\":\n\n+typedef struct TidStoreControl\n+{\n+ /* the number of tids in the store */\n+ int64 num_tids;\n+\n+ /* the maximum bytes a TidStore can use */\n+ size_t max_bytes;\n\nI'm pretty sure max_bytes does not need to be in shared memory, and\ncertainly not under a lock: Thinking of a hypothetical\nparallel-prune-phase scenario, one way would be for a leader process\nto pass out ranges of blocks to workers, and when the limit is\nexceeded, stop passing out blocks and wait for all the workers to\nfinish.\n\nAs for num_tids, vacuum previously put the similar count in\n\n@@ -176,7 +179,8 @@ struct ParallelVacuumState\n PVIndStats *indstats;\n\n /* Shared dead items space among parallel vacuum workers */\n- VacDeadItems *dead_items;\n+ TidStore *dead_items;\n\nVacDeadItems contained \"num_items\". What was the reason to have new\ninfrastructure for that count? And it doesn't seem like access to it\nwas controlled by a lock -- can you confirm? If we did get parallel\npruning, maybe the count would belong inside PVShared?\n\nThe number of tids is not that tightly bound to the tidstore's job. I\nbelieve tidbitmap.c (a possible future client) doesn't care about the\nglobal number of tids -- not only that, but AND/OR operations can\nchange the number in a non-obvious way, so it would not be convenient\nto keep an accurate number anyway. But the lock would still be\nmandatory with this patch.\n\nIf we can make vacuum work a bit closer to how it does now, it'd be a\nbig step up in readability, I think. Namely, getting rid of all the\nlocking logic inside tidstore.c and let the radix tree's locking do\nthe right thing. We'd need to make that work correctly when receiving\npointers to values upon lookup, and I already shared ideas for that.\nBut I want to see if there is any obstacle in the way of removing the\ntidstore control object and it's separate lock.\n\n\n", "msg_date": "Mon, 8 Jan 2024 18:35:22 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jan 3, 2024 at 11:10 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Jan 2, 2024 at 8:01 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I agree that we expose RT_LOCK_* functions and have tidstore use them,\n> > but am not sure the if (TidStoreIsShared(ts) LWLockAcquire(..., ...)\"\n> > calls part. I think that even if we expose them, we will still need to\n> > do something like \"if (TidStoreIsShared(ts))\n> > shared_rt_lock_share(ts->tree.shared)\", no?\n>\n> I'll come back to this topic separately.\n>\n> > I've attached a new patch set. From v47 patch, I've merged your\n> > changes for radix tree, and split the vacuum integration patch into 3\n> > patches: simply replaces VacDeadItems with TidsTore (0007 patch), and\n> > use a simple TID array for one-pass strategy (0008 patch), and replace\n> > has_lpdead_items with \"num_offsets > 0\" (0009 patch), while\n> > incorporating your review comments on the vacuum integration patch\n>\n> Nice!\n>\n> > (sorry for making it difficult to see the changes from v47 patch).\n>\n> It's actually pretty clear. I just have a couple comments before\n> sharing my latest cleanups:\n>\n> (diff'ing between v47 and v48):\n>\n> -- /*\n> - * In the shared case, TidStoreControl and radix_tree are backed by the\n> - * same DSA area and rt_memory_usage() returns the value including both.\n> - * So we don't need to add the size of TidStoreControl separately.\n> - */\n> if (TidStoreIsShared(ts))\n> - return sizeof(TidStore) +\n> shared_rt_memory_usage(ts->tree.shared);\n> + rt_mem = shared_rt_memory_usage(ts->tree.shared);\n> + else\n> + rt_mem = local_rt_memory_usage(ts->tree.local);\n>\n> - return sizeof(TidStore) + sizeof(TidStore) +\n> local_rt_memory_usage(ts->tree.local);\n> + return sizeof(TidStore) + sizeof(TidStoreControl) + rt_mem;\n>\n> Upthread, I meant that I don't see the need to include the size of\n> these structs *at all*. They're tiny, and the blocks/segments will\n> almost certainly have some empty space counted in the total anyway.\n> The returned size is already overestimated, so this extra code is just\n> a distraction.\n\nAgreed.\n\n>\n> - if (result->num_offsets + bmw_popcount(w) > result->max_offset)\n> + if (result->num_offsets + (sizeof(bitmapword) * BITS_PER_BITMAPWORD)\n> >= result->max_offset)\n>\n> I believe this math is wrong. We care about \"result->num_offsets +\n> BITS_PER_BITMAPWORD\", right?\n> Also, it seems if the condition evaluates to equal, we still have\n> enough space, in which case \">\" the max is the right condition.\n\nOops, you're right. Fixed.\n\n>\n> - if (off < 1 || off > MAX_TUPLES_PER_PAGE)\n> + if (off < 1 || off > MaxOffsetNumber)\n>\n> This can now use OffsetNumberIsValid().\n\nFixed.\n\n>\n> > 0013 to 0015 patches are also updates from v47 patch.\n>\n> > I'm thinking that we should change the order of the patches so that\n> > tidstore patch requires the patch for changing DSA segment sizes. That\n> > way, we can remove the complex max memory calculation part that we no\n> > longer use from the tidstore patch.\n>\n> I don't think there is any reason to have those calculations at all at\n> this point. Every patch in every version should at least *work\n> correctly*, without kludging m_w_m and without constraining max\n> segment size. I'm fine with the latter remaining in its own thread,\n> and I hope we can consider it an enhancement that respects the admin's\n> configured limits more effectively, and not a pre-requisite for not\n> breaking. I *think* we're there now, but it's hard to tell since 0015\n> was at the very end. As I said recently, if something still fails, I'd\n> like to know why. So for v49, I took the liberty of removing the DSA\n> max segment patches for now, and squashing v48-0015.\n\nFair enough.\n\n>\n> In addition for v49, I have quite a few cleanups:\n>\n> 0001 - This hasn't been touched in a very long time, but I ran\n> pgindent and clarified a comment\n> 0002 - We no longer need to isolate the rightmost bit anywhere, so\n> removed that part and revised the commit message accordingly.\n\nThanks.\n\n>\n> radix tree:\n> 0003 - v48 plus squashed v48-0013\n> 0004 - Removed or adjusted WIP, FIXME, TODO items. Some were outdated,\n> and I fixed most of the rest.\n> 0005 - Remove the RT_PTR_LOCAL macro, since it's not really useful anymore.\n> 0006 - RT_FREE_LEAF only needs the allocated pointer, so pass that. A\n> bit simpler.\n> 0007 - Uses the same idea from a previous cleanup of RT_SET, for RT_DELETE.\n> 0008 - Removes a holdover from the multi-value leaves era.\n> 0009 - It occurred to me that we need to have unique names for memory\n> contexts for different instantiations of the template. This is one way\n> to do it, by using the configured RT_PREFIX in the context name. I\n> also took an extra step to make the size class fanout show up\n> correctly on different platforms, but that's probably overkill and\n> undesirable, and I'll probably use only the class name next time.\n> 0010/11 - Make the array functions less surprising and with more\n> informative names.\n> 0012 - Restore a useful technique from Andres's prototype. This part\n> has been slow for a long time, so much that it showed up in a profile\n> where this path wasn't even taken much.\n\nThese changes look good to me. I've squashed them.\n\nIn addition, I've made some changes and cleanups:\n\n0010 - address the above review comments.\n0011 - simplify the radix tree iteration code. I hope it makes the\ncode clear and readable. Also I removed RT_UPDATE_ITER_STACK().\n0012 - fix a typo\n0013 - In RT_SHMEM case, we use SIZEOF_VOID_P for\nRT_VALUE_IS_EMBEDDABLE check, but I think it's not correct. Because\nDSA has its own pointer size, SIZEOF_DSA_POINTER, it could be 4 bytes\neven if SIZEOF_VOID_P is 8 bytes, for example in a case where\n!defined(PG_HAVE_ATOMIC_U64_SUPPORT). Please refer to dsa.h for\ndetails.\n0014 - cleanup RT_VERIFY code.\n0015 - change and cleanup RT_DUMP_NODE(). Now it dumps only one node\nand no longer supports dumping nodes recursively.\n0016 - remove RT_DUMP_SEARCH() and RT_DUMP(). These seem no longer necessary.\n0017 - MOve RT_DUMP_NODE to the debug function section, close to RT_STATS.\n0018 - Fix a printf format in RT_STATS().\n\nBTW, now that the inner and leaf nodes use the same structure, do we\nstill need RT_NODE_BASE_XXX types? Most places where we use\nRT_NODE_BASE_XXX types can be replaced with RT_NODE_XXX types.\nExceptions are RT_FANOUT_XX calculations:\n\n#if SIZEOF_VOID_P < 8\n#define RT_FANOUT_16_LO ((96 - sizeof(RT_NODE_BASE_16)) / sizeof(RT_PTR_ALLOC))\n#define RT_FANOUT_48 ((512 - sizeof(RT_NODE_BASE_48)) / sizeof(RT_PTR_ALLOC))\n#else\n#define RT_FANOUT_16_LO ((160 - sizeof(RT_NODE_BASE_16)) / sizeof(RT_PTR_ALLOC))\n#define RT_FANOUT_48 ((768 - sizeof(RT_NODE_BASE_48)) / sizeof(RT_PTR_ALLOC))\n#endif /* SIZEOF_VOID_P < 8 */\n\nBut I think we can replace them with offsetof(RT_NODE_16, children) etc.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 9 Jan 2024 11:40:03 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 9, 2024 at 9:40 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> In addition, I've made some changes and cleanups:\n\nThese look good to me, although I have not tried dumping a node in a while.\n\n> 0011 - simplify the radix tree iteration code. I hope it makes the\n> code clear and readable. Also I removed RT_UPDATE_ITER_STACK().\n\nI'm very pleased with how much simpler it is now!\n\n> 0013 - In RT_SHMEM case, we use SIZEOF_VOID_P for\n> RT_VALUE_IS_EMBEDDABLE check, but I think it's not correct. Because\n> DSA has its own pointer size, SIZEOF_DSA_POINTER, it could be 4 bytes\n> even if SIZEOF_VOID_P is 8 bytes, for example in a case where\n> !defined(PG_HAVE_ATOMIC_U64_SUPPORT). Please refer to dsa.h for\n> details.\n\nThanks for the pointer. ;-)\n\n> BTW, now that the inner and leaf nodes use the same structure, do we\n> still need RT_NODE_BASE_XXX types? Most places where we use\n> RT_NODE_BASE_XXX types can be replaced with RT_NODE_XXX types.\n\nThat's been in the back of my mind as well. Maybe the common header\nshould be the new \"base\" member? At least, something other than \"n\".\n\n> Exceptions are RT_FANOUT_XX calculations:\n>\n> #if SIZEOF_VOID_P < 8\n> #define RT_FANOUT_16_LO ((96 - sizeof(RT_NODE_BASE_16)) / sizeof(RT_PTR_ALLOC))\n> #define RT_FANOUT_48 ((512 - sizeof(RT_NODE_BASE_48)) / sizeof(RT_PTR_ALLOC))\n> #else\n> #define RT_FANOUT_16_LO ((160 - sizeof(RT_NODE_BASE_16)) / sizeof(RT_PTR_ALLOC))\n> #define RT_FANOUT_48 ((768 - sizeof(RT_NODE_BASE_48)) / sizeof(RT_PTR_ALLOC))\n> #endif /* SIZEOF_VOID_P < 8 */\n>\n> But I think we can replace them with offsetof(RT_NODE_16, children) etc.\n\nThat makes sense. Do you want to have a go at it, or shall I?\n\nI think after that, the only big cleanup needed is putting things in a\nmore readable order. I can do that at a later date, and other\nopportunities for beautification are pretty minor and localized.\n\nRationalizing locking is the only thing left that requires a bit of thought.\n\n\n", "msg_date": "Tue, 9 Jan 2024 18:19:46 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 9, 2024 at 8:19 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Jan 9, 2024 at 9:40 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > In addition, I've made some changes and cleanups:\n>\n> These look good to me, although I have not tried dumping a node in a while.\n>\n> > 0011 - simplify the radix tree iteration code. I hope it makes the\n> > code clear and readable. Also I removed RT_UPDATE_ITER_STACK().\n>\n> I'm very pleased with how much simpler it is now!\n>\n> > 0013 - In RT_SHMEM case, we use SIZEOF_VOID_P for\n> > RT_VALUE_IS_EMBEDDABLE check, but I think it's not correct. Because\n> > DSA has its own pointer size, SIZEOF_DSA_POINTER, it could be 4 bytes\n> > even if SIZEOF_VOID_P is 8 bytes, for example in a case where\n> > !defined(PG_HAVE_ATOMIC_U64_SUPPORT). Please refer to dsa.h for\n> > details.\n>\n> Thanks for the pointer. ;-)\n>\n> > BTW, now that the inner and leaf nodes use the same structure, do we\n> > still need RT_NODE_BASE_XXX types? Most places where we use\n> > RT_NODE_BASE_XXX types can be replaced with RT_NODE_XXX types.\n>\n> That's been in the back of my mind as well. Maybe the common header\n> should be the new \"base\" member? At least, something other than \"n\".\n\nAgreed.\n\n>\n> > Exceptions are RT_FANOUT_XX calculations:\n> >\n> > #if SIZEOF_VOID_P < 8\n> > #define RT_FANOUT_16_LO ((96 - sizeof(RT_NODE_BASE_16)) / sizeof(RT_PTR_ALLOC))\n> > #define RT_FANOUT_48 ((512 - sizeof(RT_NODE_BASE_48)) / sizeof(RT_PTR_ALLOC))\n> > #else\n> > #define RT_FANOUT_16_LO ((160 - sizeof(RT_NODE_BASE_16)) / sizeof(RT_PTR_ALLOC))\n> > #define RT_FANOUT_48 ((768 - sizeof(RT_NODE_BASE_48)) / sizeof(RT_PTR_ALLOC))\n> > #endif /* SIZEOF_VOID_P < 8 */\n> >\n> > But I think we can replace them with offsetof(RT_NODE_16, children) etc.\n>\n> That makes sense. Do you want to have a go at it, or shall I?\n\nI've done in 0010 patch in v51 patch set. Whereas RT_NODE_4 and\nRT_NODE_16 structs declaration needs RT_FANOUT_4_HI and\nRT_FANOUT_16_HI respectively, RT_FANOUT_16_LO and RT_FANOUT_48 need\nRT_NODE_16 and RT_NODE_48 structs declaration. So fanout declarations\nare now spread before and after RT_NODE_XXX struct declaration. It's a\nbit less readable, but I'm not sure of a better way.\n\nThe previous updates are merged into the main radix tree patch and\ntidstore patch. Nothing changes in other patches from v50.\n\n>\n> I think after that, the only big cleanup needed is putting things in a\n> more readable order. I can do that at a later date, and other\n> opportunities for beautification are pretty minor and localized.\n\nAgreed.\n\n>\n> Rationalizing locking is the only thing left that requires a bit of thought.\n\nRight, I'll send a reply soon.\n\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Wed, 10 Jan 2024 11:05:06 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jan 10, 2024 at 9:05 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> I've done in 0010 patch in v51 patch set. Whereas RT_NODE_4 and\n> RT_NODE_16 structs declaration needs RT_FANOUT_4_HI and\n> RT_FANOUT_16_HI respectively, RT_FANOUT_16_LO and RT_FANOUT_48 need\n> RT_NODE_16 and RT_NODE_48 structs declaration. So fanout declarations\n> are now spread before and after RT_NODE_XXX struct declaration. It's a\n> bit less readable, but I'm not sure of a better way.\n\nThey were before and after the *_BASE types, so it's not really worse,\nI think. I did notice that RT_SLOT_IDX_LIMIT has been considered\nspecial for a very long time, before we even had size classes, so it's\nthe same thing but even more far away. I have an idea to introduce\n*_MAX macros, allowing to turn RT_SLOT_IDX_LIMIT into\nRT_FANOUT_48_MAX, so that everything is in the same spot, and to make\nthis area more consistent. I also noticed that I'd been assuming that\nRT_FANOUT_16_HI fits easily into a DSA size class, but that's only\ntrue on 64-bit, and in any case we don't want to assume it. I've\nattached an addendum .txt to demo this idea.", "msg_date": "Wed, 10 Jan 2024 13:40:40 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 8, 2024 at 8:35 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Wed, Jan 3, 2024 at 9:10 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Tue, Jan 2, 2024 at 8:01 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > I agree that we expose RT_LOCK_* functions and have tidstore use them,\n> > > but am not sure the if (TidStoreIsShared(ts) LWLockAcquire(..., ...)\"\n> > > calls part. I think that even if we expose them, we will still need to\n> > > do something like \"if (TidStoreIsShared(ts))\n> > > shared_rt_lock_share(ts->tree.shared)\", no?\n> >\n> > I'll come back to this topic separately.\n>\n> To answer your question, sure, but that \"if (TidStoreIsShared(ts))\"\n> part would be pushed down into a function so that only one place has\n> to care about it.\n>\n> However, I'm starting to question whether we even need that. Meaning,\n> lock the tidstore separately. To \"lock the tidstore\" means to take a\n> lock, _separate_ from the radix tree's internal lock, to control\n> access to two fields in a separate \"control object\":\n>\n> +typedef struct TidStoreControl\n> +{\n> + /* the number of tids in the store */\n> + int64 num_tids;\n> +\n> + /* the maximum bytes a TidStore can use */\n> + size_t max_bytes;\n>\n> I'm pretty sure max_bytes does not need to be in shared memory, and\n> certainly not under a lock: Thinking of a hypothetical\n> parallel-prune-phase scenario, one way would be for a leader process\n> to pass out ranges of blocks to workers, and when the limit is\n> exceeded, stop passing out blocks and wait for all the workers to\n> finish.\n\nTrue. I agreed that it doesn't need to be under a lock anyway, as it's\nread-only.\n\n>\n> As for num_tids, vacuum previously put the similar count in\n>\n> @@ -176,7 +179,8 @@ struct ParallelVacuumState\n> PVIndStats *indstats;\n>\n> /* Shared dead items space among parallel vacuum workers */\n> - VacDeadItems *dead_items;\n> + TidStore *dead_items;\n>\n> VacDeadItems contained \"num_items\". What was the reason to have new\n> infrastructure for that count? And it doesn't seem like access to it\n> was controlled by a lock -- can you confirm? If we did get parallel\n> pruning, maybe the count would belong inside PVShared?\n\nI thought that since the tidstore is a general-purpose data structure\nthe shared counter should be protected by a lock. One thing I'm\nconcerned about is that we might need to update both the radix tree\nand the counter atomically in some cases. But that's true we don't\nneed it for lazy vacuum at least for now. Even given the parallel scan\nphase, probably we won't need to have workers check the total number\nof stored tuples during a parallel scan.\n\n>\n> The number of tids is not that tightly bound to the tidstore's job. I\n> believe tidbitmap.c (a possible future client) doesn't care about the\n> global number of tids -- not only that, but AND/OR operations can\n> change the number in a non-obvious way, so it would not be convenient\n> to keep an accurate number anyway. But the lock would still be\n> mandatory with this patch.\n\nVery good point.\n\n>\n> If we can make vacuum work a bit closer to how it does now, it'd be a\n> big step up in readability, I think. Namely, getting rid of all the\n> locking logic inside tidstore.c and let the radix tree's locking do\n> the right thing. We'd need to make that work correctly when receiving\n> pointers to values upon lookup, and I already shared ideas for that.\n> But I want to see if there is any obstacle in the way of removing the\n> tidstore control object and it's separate lock.\n\nSo I agree to remove both max_bytes and num_items from the control\nobject.Also, as you mentioned, we can remove the tidstore control\nobject itself. TidStoreGetHandle() returns a radix tree handle, and we\ncan pass it to TidStoreAttach(). I'll try it.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 11 Jan 2024 09:28:44 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jan 11, 2024 at 9:28 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Jan 8, 2024 at 8:35 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Wed, Jan 3, 2024 at 9:10 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > >\n> > > On Tue, Jan 2, 2024 at 8:01 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > > I agree that we expose RT_LOCK_* functions and have tidstore use them,\n> > > > but am not sure the if (TidStoreIsShared(ts) LWLockAcquire(..., ...)\"\n> > > > calls part. I think that even if we expose them, we will still need to\n> > > > do something like \"if (TidStoreIsShared(ts))\n> > > > shared_rt_lock_share(ts->tree.shared)\", no?\n> > >\n> > > I'll come back to this topic separately.\n> >\n> > To answer your question, sure, but that \"if (TidStoreIsShared(ts))\"\n> > part would be pushed down into a function so that only one place has\n> > to care about it.\n> >\n> > However, I'm starting to question whether we even need that. Meaning,\n> > lock the tidstore separately. To \"lock the tidstore\" means to take a\n> > lock, _separate_ from the radix tree's internal lock, to control\n> > access to two fields in a separate \"control object\":\n> >\n> > +typedef struct TidStoreControl\n> > +{\n> > + /* the number of tids in the store */\n> > + int64 num_tids;\n> > +\n> > + /* the maximum bytes a TidStore can use */\n> > + size_t max_bytes;\n> >\n> > I'm pretty sure max_bytes does not need to be in shared memory, and\n> > certainly not under a lock: Thinking of a hypothetical\n> > parallel-prune-phase scenario, one way would be for a leader process\n> > to pass out ranges of blocks to workers, and when the limit is\n> > exceeded, stop passing out blocks and wait for all the workers to\n> > finish.\n>\n> True. I agreed that it doesn't need to be under a lock anyway, as it's\n> read-only.\n>\n> >\n> > As for num_tids, vacuum previously put the similar count in\n> >\n> > @@ -176,7 +179,8 @@ struct ParallelVacuumState\n> > PVIndStats *indstats;\n> >\n> > /* Shared dead items space among parallel vacuum workers */\n> > - VacDeadItems *dead_items;\n> > + TidStore *dead_items;\n> >\n> > VacDeadItems contained \"num_items\". What was the reason to have new\n> > infrastructure for that count? And it doesn't seem like access to it\n> > was controlled by a lock -- can you confirm? If we did get parallel\n> > pruning, maybe the count would belong inside PVShared?\n>\n> I thought that since the tidstore is a general-purpose data structure\n> the shared counter should be protected by a lock. One thing I'm\n> concerned about is that we might need to update both the radix tree\n> and the counter atomically in some cases. But that's true we don't\n> need it for lazy vacuum at least for now. Even given the parallel scan\n> phase, probably we won't need to have workers check the total number\n> of stored tuples during a parallel scan.\n>\n> >\n> > The number of tids is not that tightly bound to the tidstore's job. I\n> > believe tidbitmap.c (a possible future client) doesn't care about the\n> > global number of tids -- not only that, but AND/OR operations can\n> > change the number in a non-obvious way, so it would not be convenient\n> > to keep an accurate number anyway. But the lock would still be\n> > mandatory with this patch.\n>\n> Very good point.\n>\n> >\n> > If we can make vacuum work a bit closer to how it does now, it'd be a\n> > big step up in readability, I think. Namely, getting rid of all the\n> > locking logic inside tidstore.c and let the radix tree's locking do\n> > the right thing. We'd need to make that work correctly when receiving\n> > pointers to values upon lookup, and I already shared ideas for that.\n> > But I want to see if there is any obstacle in the way of removing the\n> > tidstore control object and it's separate lock.\n>\n> So I agree to remove both max_bytes and num_items from the control\n> object.Also, as you mentioned, we can remove the tidstore control\n> object itself. TidStoreGetHandle() returns a radix tree handle, and we\n> can pass it to TidStoreAttach(). I'll try it.\n>\n\nI realized that if we remove the whole tidstore control object\nincluding max_bytes, processes who attached the shared tidstore cannot\nuse TidStoreIsFull() actually as it always returns true. Also they\ncannot use TidStoreReset() as well since it needs to pass max_bytes to\nRT_CREATE(). It might not be a problem in terms of lazy vacuum, but it\ncould be problematic for general use. If we remove it, we probably\nneed a safeguard to prevent those who attached the tidstore from\ncalling these functions. Or we can keep the control object but remove\nthe lock and num_tids.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 12 Jan 2024 17:49:04 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jan 12, 2024 at 3:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Jan 11, 2024 at 9:28 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > So I agree to remove both max_bytes and num_items from the control\n> > object.Also, as you mentioned, we can remove the tidstore control\n> > object itself. TidStoreGetHandle() returns a radix tree handle, and we\n> > can pass it to TidStoreAttach(). I'll try it.\n\nThanks. It's worth looking closely here.\n\n> I realized that if we remove the whole tidstore control object\n> including max_bytes, processes who attached the shared tidstore cannot\n> use TidStoreIsFull() actually as it always returns true.\n\nI imagine that we'd replace that with a function (maybe an earlier\nversion had it?) to report the memory usage to the caller, which\nshould know where to find max_bytes.\n\n> Also they\n> cannot use TidStoreReset() as well since it needs to pass max_bytes to\n> RT_CREATE(). It might not be a problem in terms of lazy vacuum, but it\n> could be problematic for general use.\n\nHEAD has no problem finding the necessary values, and I don't think\nit'd be difficult to maintain that ability. I'm not actually sure what\n\"general use\" needs to have, and I'm not sure anyone can guess.\nThere's the future possibility of parallel heap-scanning, but I'm\nguessing a *lot* more needs to happen for that to work, so I'm not\nsure how much it buys us to immediately start putting those two fields\nin a special abstraction. The only other concrete use case mentioned\nin this thread that I remember is bitmap heap scan, and I believe that\nwould never need to reset, only free the whole thing when finished.\n\nI spent some more time studying parallel vacuum, and have some\nthoughts. In HEAD, we have\n\n-/*\n- * VacDeadItems stores TIDs whose index tuples are deleted by index vacuuming.\n- */\n-typedef struct VacDeadItems\n-{\n- int max_items; /* # slots allocated in array */\n- int num_items; /* current # of entries */\n-\n- /* Sorted array of TIDs to delete from indexes */\n- ItemPointerData items[FLEXIBLE_ARRAY_MEMBER];\n-} VacDeadItems;\n\n...which has the tids, plus two fields that function _very similarly_\nto the two extra fields in the tidstore control object. It's a bit\nstrange to me that the patch doesn't have this struct anymore.\n\nI suspect if we keep it around (just change \"items\" to be the local\ntidstore struct), the patch would have a bit less churn and look/work\nmore like the current code. I think it might be easier to read if the\nv17 commits are suited to the current needs of vacuum, rather than try\nto anticipate all uses. Richer abstractions can come later if needed.\nAnother stanza:\n\n- /* Prepare the dead_items space */\n- dead_items = (VacDeadItems *) shm_toc_allocate(pcxt->toc,\n- est_dead_items_len);\n- dead_items->max_items = max_items;\n- dead_items->num_items = 0;\n- MemSet(dead_items->items, 0, sizeof(ItemPointerData) * max_items);\n- shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_DEAD_ITEMS, dead_items);\n- pvs->dead_items = dead_items;\n\nWith s/max_items/max_bytes/, I wonder if we can still use some of\nthis, and parallel workers would have no problem getting the necessary\ninfo, as they do today. If not, I don't really understand why. I'm not\nvery familiar with working with shared memory, and I know the tree\nitself needs some different setup, so it's quite possible I'm missing\nsomething.\n\nI find it difficult to kept straight these four things:\n\n- radix tree\n- radix tree control object\n- tidstore\n- tidstore control object\n\nEven with the code in front of me, it's hard to reason about how these\nconcepts fit together. It'd be much more readable if this was\nsimplified.\n\n\n", "msg_date": "Sun, 14 Jan 2024 20:42:49 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sun, Jan 14, 2024 at 10:43 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Fri, Jan 12, 2024 at 3:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Jan 11, 2024 at 9:28 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > So I agree to remove both max_bytes and num_items from the control\n> > > object.Also, as you mentioned, we can remove the tidstore control\n> > > object itself. TidStoreGetHandle() returns a radix tree handle, and we\n> > > can pass it to TidStoreAttach(). I'll try it.\n>\n> Thanks. It's worth looking closely here.\n>\n> > I realized that if we remove the whole tidstore control object\n> > including max_bytes, processes who attached the shared tidstore cannot\n> > use TidStoreIsFull() actually as it always returns true.\n>\n> I imagine that we'd replace that with a function (maybe an earlier\n> version had it?) to report the memory usage to the caller, which\n> should know where to find max_bytes.\n>\n> > Also they\n> > cannot use TidStoreReset() as well since it needs to pass max_bytes to\n> > RT_CREATE(). It might not be a problem in terms of lazy vacuum, but it\n> > could be problematic for general use.\n>\n> HEAD has no problem finding the necessary values, and I don't think\n> it'd be difficult to maintain that ability. I'm not actually sure what\n> \"general use\" needs to have, and I'm not sure anyone can guess.\n> There's the future possibility of parallel heap-scanning, but I'm\n> guessing a *lot* more needs to happen for that to work, so I'm not\n> sure how much it buys us to immediately start putting those two fields\n> in a special abstraction. The only other concrete use case mentioned\n> in this thread that I remember is bitmap heap scan, and I believe that\n> would never need to reset, only free the whole thing when finished.\n>\n> I spent some more time studying parallel vacuum, and have some\n> thoughts. In HEAD, we have\n>\n> -/*\n> - * VacDeadItems stores TIDs whose index tuples are deleted by index vacuuming.\n> - */\n> -typedef struct VacDeadItems\n> -{\n> - int max_items; /* # slots allocated in array */\n> - int num_items; /* current # of entries */\n> -\n> - /* Sorted array of TIDs to delete from indexes */\n> - ItemPointerData items[FLEXIBLE_ARRAY_MEMBER];\n> -} VacDeadItems;\n>\n> ...which has the tids, plus two fields that function _very similarly_\n> to the two extra fields in the tidstore control object. It's a bit\n> strange to me that the patch doesn't have this struct anymore.\n>\n> I suspect if we keep it around (just change \"items\" to be the local\n> tidstore struct), the patch would have a bit less churn and look/work\n> more like the current code. I think it might be easier to read if the\n> v17 commits are suited to the current needs of vacuum, rather than try\n> to anticipate all uses. Richer abstractions can come later if needed.\n\nJust changing \"items\" to be the local tidstore struct could make the\ncode tricky a bit, since max_bytes and num_items are on the shared\nmemory while \"items\" is a local pointer to the shared tidstore. This\nis a reason why I abstract them behind TidStore. However, IIUC the\ncurrent parallel vacuum can work with such VacDeadItems fields,\nfortunately. The leader process can use VacDeadItems allocated on DSM,\nand worker processes can use a local VacDeadItems of which max_bytes\nand num_items are copied from the shared one and \"items\" is a local\npointer.\n\nAssuming parallel heap scan requires for both the leader and workers\nto update the shared VacDeadItems concurrently, we may need such\nricher abstractions.\n\nI've implemented this idea in the v52 patch set. Here is the summary\nof the updates:\n\n0008: Remove the control object from tidstore. Also removed some\nunsupported functions such as TidStoreNumTids()\n0009: Adjust lazy vacuum integration patch with the control object removal.\n\nI've not updated any locking code yet. Once we confirm this direction,\nI'll update the locking code too.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 16 Jan 2024 15:17:35 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 16, 2024 at 1:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> Just changing \"items\" to be the local tidstore struct could make the\n> code tricky a bit, since max_bytes and num_items are on the shared\n> memory while \"items\" is a local pointer to the shared tidstore.\n\nThanks for trying it this way! I like the overall simplification but\nthis aspect is not great.\nHmm, I wonder if that's a side-effect of the \"create\" functions doing\ntheir own allocations and returning a pointer. Would it be less tricky\nif the structs were declared where we need them and passed to \"init\"\nfunctions?\n\nThat may be a good idea for other reasons. It's awkward that the\ncreate function is declared like this:\n\n#ifdef RT_SHMEM\nRT_SCOPE RT_RADIX_TREE *RT_CREATE(MemoryContext ctx, Size max_bytes,\ndsa_area *dsa,\nint tranche_id);\n#else\nRT_SCOPE RT_RADIX_TREE *RT_CREATE(MemoryContext ctx, Size max_bytes);\n#endif\n\nAn init function wouldn't need these parameters: it could look at the\npassed struct to know what to do.\n\n\n", "msg_date": "Wed, 17 Jan 2024 07:20:21 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jan 17, 2024 at 9:20 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Jan 16, 2024 at 1:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > Just changing \"items\" to be the local tidstore struct could make the\n> > code tricky a bit, since max_bytes and num_items are on the shared\n> > memory while \"items\" is a local pointer to the shared tidstore.\n>\n> Thanks for trying it this way! I like the overall simplification but\n> this aspect is not great.\n> Hmm, I wonder if that's a side-effect of the \"create\" functions doing\n> their own allocations and returning a pointer. Would it be less tricky\n> if the structs were declared where we need them and passed to \"init\"\n> functions?\n\nSeems worth trying. The current RT_CREATE() API is also convenient as\nother data structure such as simplehash.h and dshash.c supports a\nsimilar\n\n>\n> That may be a good idea for other reasons. It's awkward that the\n> create function is declared like this:\n>\n> #ifdef RT_SHMEM\n> RT_SCOPE RT_RADIX_TREE *RT_CREATE(MemoryContext ctx, Size max_bytes,\n> dsa_area *dsa,\n> int tranche_id);\n> #else\n> RT_SCOPE RT_RADIX_TREE *RT_CREATE(MemoryContext ctx, Size max_bytes);\n> #endif\n>\n> An init function wouldn't need these parameters: it could look at the\n> passed struct to know what to do.\n\nBut the init function would initialize leaf_ctx etc,no? Initializing\nleaf_ctx needs max_bytes that is not stored in RT_RADIX_TREE. The same\nis true for dsa. I imagined that an init function would allocate a DSA\nmemory for the control object. So I imagine we will end up still\nrequiring some of them.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 17 Jan 2024 10:38:47 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jan 17, 2024 at 8:39 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Wed, Jan 17, 2024 at 9:20 AM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Tue, Jan 16, 2024 at 1:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > Just changing \"items\" to be the local tidstore struct could make the\n> > > code tricky a bit, since max_bytes and num_items are on the shared\n> > > memory while \"items\" is a local pointer to the shared tidstore.\n> >\n> > Thanks for trying it this way! I like the overall simplification but\n> > this aspect is not great.\n> > Hmm, I wonder if that's a side-effect of the \"create\" functions doing\n> > their own allocations and returning a pointer. Would it be less tricky\n> > if the structs were declared where we need them and passed to \"init\"\n> > functions?\n>\n> Seems worth trying. The current RT_CREATE() API is also convenient as\n> other data structure such as simplehash.h and dshash.c supports a\n> similar\n\nI don't happen to know if these paths had to solve similar trickiness\nwith some values being local, and some shared.\n\n> > That may be a good idea for other reasons. It's awkward that the\n> > create function is declared like this:\n> >\n> > #ifdef RT_SHMEM\n> > RT_SCOPE RT_RADIX_TREE *RT_CREATE(MemoryContext ctx, Size max_bytes,\n> > dsa_area *dsa,\n> > int tranche_id);\n> > #else\n> > RT_SCOPE RT_RADIX_TREE *RT_CREATE(MemoryContext ctx, Size max_bytes);\n> > #endif\n> >\n> > An init function wouldn't need these parameters: it could look at the\n> > passed struct to know what to do.\n>\n> But the init function would initialize leaf_ctx etc,no? Initializing\n> leaf_ctx needs max_bytes that is not stored in RT_RADIX_TREE.\n\nI was more referring to the parameters that were different above\ndepending on shared memory. My first thought was that the tricky part\nis because of the allocation in local memory, but it's certainly\npossible I've misunderstood the problem.\n\n> The same\n> is true for dsa. I imagined that an init function would allocate a DSA\n> memory for the control object.\n\nYes:\n\n...\n// embedded in VacDeadItems\n TidStore items;\n};\n\n// NULL DSA in local case, etc\ndead_items->items.area = dead_items_dsa;\ndead_items->items.tranche_id = FOO_ID;\n\nTidStoreInit(&dead_items->items, vac_work_mem);\n\nThat's how I imagined it would work (leaving out some details). I\nhaven't tried it, so not sure how much it helps. Maybe it has other\nproblems, but I'm hoping it's just a matter of programming.\n\nIf we can't make this work nicely, I'd be okay with keeping the tid\nstore control object. My biggest concern is unnecessary\ndouble-locking.\n\n\n", "msg_date": "Wed, 17 Jan 2024 09:37:18 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I wrote:\n\n> > Hmm, I wonder if that's a side-effect of the \"create\" functions doing\n> > their own allocations and returning a pointer. Would it be less tricky\n> > if the structs were declared where we need them and passed to \"init\"\n> > functions?\n\nIf this is a possibility, I thought I'd first send the last (I hope)\nlarge-ish set of radix tree cleanups to avoid rebasing issues. I'm not\nincluding tidstore/vacuum here, because recent discussion has some\nup-in-the-air work.\n\nShould be self-explanatory, but some thing are worth calling out:\n0012 and 0013: Some time ago I started passing insertpos as a\nparameter, but now see that is not ideal -- when growing from node16\nto node48 we don't need it at all, so it's a wasted calculation. While\nreverting that, I found that this also allows passing constants in\nsome cases.\n0014 makes a cleaner separation between adding a child and growing a\nnode, resulting in more compact-looking functions.\n0019 is a bit unpolished, but I realized that it's pointless to assign\na zero child when further up the call stack we overwrite it anyway\nwith the actual value. With this, that assignment is skipped. This\nmakes some comments and names strange, so needs a bit of polish, but\nwanted to get it out there anyway.", "msg_date": "Wed, 17 Jan 2024 10:32:25 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jan 17, 2024 at 11:37 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Wed, Jan 17, 2024 at 8:39 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Wed, Jan 17, 2024 at 9:20 AM John Naylor <johncnaylorls@gmail.com> wrote:\n> > >\n> > > On Tue, Jan 16, 2024 at 1:18 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > > Just changing \"items\" to be the local tidstore struct could make the\n> > > > code tricky a bit, since max_bytes and num_items are on the shared\n> > > > memory while \"items\" is a local pointer to the shared tidstore.\n> > >\n> > > Thanks for trying it this way! I like the overall simplification but\n> > > this aspect is not great.\n> > > Hmm, I wonder if that's a side-effect of the \"create\" functions doing\n> > > their own allocations and returning a pointer. Would it be less tricky\n> > > if the structs were declared where we need them and passed to \"init\"\n> > > functions?\n> >\n> > Seems worth trying. The current RT_CREATE() API is also convenient as\n> > other data structure such as simplehash.h and dshash.c supports a\n> > similar\n>\n> I don't happen to know if these paths had to solve similar trickiness\n> with some values being local, and some shared.\n>\n> > > That may be a good idea for other reasons. It's awkward that the\n> > > create function is declared like this:\n> > >\n> > > #ifdef RT_SHMEM\n> > > RT_SCOPE RT_RADIX_TREE *RT_CREATE(MemoryContext ctx, Size max_bytes,\n> > > dsa_area *dsa,\n> > > int tranche_id);\n> > > #else\n> > > RT_SCOPE RT_RADIX_TREE *RT_CREATE(MemoryContext ctx, Size max_bytes);\n> > > #endif\n> > >\n> > > An init function wouldn't need these parameters: it could look at the\n> > > passed struct to know what to do.\n> >\n> > But the init function would initialize leaf_ctx etc,no? Initializing\n> > leaf_ctx needs max_bytes that is not stored in RT_RADIX_TREE.\n>\n> I was more referring to the parameters that were different above\n> depending on shared memory. My first thought was that the tricky part\n> is because of the allocation in local memory, but it's certainly\n> possible I've misunderstood the problem.\n>\n> > The same\n> > is true for dsa. I imagined that an init function would allocate a DSA\n> > memory for the control object.\n>\n> Yes:\n>\n> ...\n> // embedded in VacDeadItems\n> TidStore items;\n> };\n>\n> // NULL DSA in local case, etc\n> dead_items->items.area = dead_items_dsa;\n> dead_items->items.tranche_id = FOO_ID;\n>\n> TidStoreInit(&dead_items->items, vac_work_mem);\n>\n> That's how I imagined it would work (leaving out some details). I\n> haven't tried it, so not sure how much it helps. Maybe it has other\n> problems, but I'm hoping it's just a matter of programming.\n\nIt seems we cannot make this work nicely. IIUC VacDeadItems is\nallocated in DSM and TidStore is embedded there. However,\ndead_items->items.area is a local pointer to dsa_area. So we cannot\ninclude dsa_area in neither TidStore nor RT_RADIX_TREE. Instead we\nwould need to pass dsa_area to each interface by callers.\n\n>\n> If we can't make this work nicely, I'd be okay with keeping the tid\n> store control object. My biggest concern is unnecessary\n> double-locking.\n\nIf we don't do any locking stuff in radix tree APIs and it's the\nuser's responsibility at all, probably we don't need a lock for\ntidstore? That is, we expose lock functions as you mentioned and the\nuser (like tidstore) acquires/releases the lock before/after accessing\nthe radix tree and num_items. Currently (as of v52 patch) RT_FIND is\ndoing so, but we would need to change RT_SET() and iteration functions\nas well.\n\nDuring trying this idea, I realized that there is a visibility problem\nin the radix tree template especially if we want to embed the radix\ntree in a struct. Considering a use case where we want to use a radix\ntree in an exposed struct, we would declare only interfaces in a .h\nfile and define actual implementation in a .c file (FYI\nTupleHashTableData does a similar thing with simplehash.h). The .c\nfile and .h file would be like:\n\nin .h file:\n#define RT_PREFIX local_rt\n#define RT_SCOPE extern\n#define RT_DECLARE\n#define RT_VALUE_TYPE BlocktableEntry\n#define RT_VARLEN_VALUE\n#include \"lib/radixtree.h\"\n\ntypedef struct TidStore\n{\n:\n local_rt_radix_tree tree; /* embedded */\n:\n} TidStore;\n\nin .c file:\n\n#define RT_PREFIX local_rt\n#define RT_SCOPE extern\n#define RT_DEFINE\n#define RT_VALUE_TYPE BlocktableEntry\n#define RT_VARLEN_VALUE\n#include \"lib/radixtree.h\"\n\nBut it doesn't work as the compiler doesn't know the actual definition\nof local_rt_radix_tree. If the 'tree' is *local_rt_radix_tree, it\nworks. The reason is that with RT_DECLARE but without RT_DEFINE, the\nradix tree template generates only forward declarations:\n\n#ifdef RT_DECLARE\n\ntypedef struct RT_RADIX_TREE RT_RADIX_TREE;\ntypedef struct RT_ITER RT_ITER;\n\nIn order to make it work, we need to move the definitions required to\nexpose RT_RADIX_TREE struct to RT_DECLARE part, which actually\nrequires to move RT_NODE, RT_HANDLE, RT_NODE_PTR, RT_SIZE_CLASS_COUNT,\nand RT_RADIX_TREE_CONTROL etc. However RT_SIZE_CLASS_COUNT, used in\nRT_RADIX_TREE, could be bothersome. Since it refers to\nRT_SIZE_CLASS_INFO that further refers to many #defines and structs,\nwe might end up moving many structs such as RT_NODE_4 etc to\nRT_DECLARE part as well. Or we can use a fixed number is stead of\n\"lengthof(RT_SIZE_CLASS_INFO)\". Apart from that, macros requried by\nboth RT_DECLARE and RT_DEFINE such as RT_PAN and RT_MAX_LEVEL also\nneeds to be moved to a common place where they are defined in both\ncases.\n\nGiven these facts, I think that the current abstraction works nicely\nand it would make sense not to support embedding the radix tree.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 18 Jan 2024 10:30:54 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jan 18, 2024 at 8:31 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> It seems we cannot make this work nicely. IIUC VacDeadItems is\n> allocated in DSM and TidStore is embedded there. However,\n> dead_items->items.area is a local pointer to dsa_area. So we cannot\n> include dsa_area in neither TidStore nor RT_RADIX_TREE. Instead we\n> would need to pass dsa_area to each interface by callers.\n\nThanks again for exploring this line of thinking! Okay, it seems even\nif there's a way to make this work, it would be too invasive to\njustify when compared with the advantage I was hoping for.\n\n> > If we can't make this work nicely, I'd be okay with keeping the tid\n> > store control object. My biggest concern is unnecessary\n> > double-locking.\n>\n> If we don't do any locking stuff in radix tree APIs and it's the\n> user's responsibility at all, probably we don't need a lock for\n> tidstore? That is, we expose lock functions as you mentioned and the\n> user (like tidstore) acquires/releases the lock before/after accessing\n> the radix tree and num_items.\n\nI'm not quite sure what the point of \"num_items\" is anymore, because\nit was really tied to the array in VacDeadItems. dead_items->num_items\nis essential to reading/writing the array correctly. If this number is\nwrong, the array is corrupt. There is no such requirement for the\nradix tree. We don't need to know the number of tids to add to it or\ndo a lookup, or anything.\n\nThere are a number of places where we assert \"the running count of the\ndead items\" is the same as \"the length of the dead items array\", like\nhere:\n\n@@ -2214,7 +2205,7 @@ lazy_vacuum(LVRelState *vacrel)\n BlockNumber threshold;\n\n Assert(vacrel->num_index_scans == 0);\n- Assert(vacrel->lpdead_items == vacrel->dead_items->num_items);\n+ Assert(vacrel->lpdead_items == TidStoreNumTids(vacrel->dead_items));\n\nAs such, in HEAD I'm guessing it's arbitrary which one is used for\ncontrol flow. Correct me if I'm mistaken. If I am wrong for some part\nof the code, it'd be good to understand when that invariant can't be\nmaintained.\n\n@@ -1258,7 +1265,7 @@ lazy_scan_heap(LVRelState *vacrel)\n * Do index vacuuming (call each index's ambulkdelete routine), then do\n * related heap vacuuming\n */\n- if (dead_items->num_items > 0)\n+ if (TidStoreNumTids(dead_items) > 0)\n lazy_vacuum(vacrel);\n\nLike here. In HEAD, could this have used vacrel->dead_items?\n\n@@ -2479,14 +2473,14 @@ lazy_vacuum_heap_rel(LVRelState *vacrel)\n * We set all LP_DEAD items from the first heap pass to LP_UNUSED during\n * the second heap pass. No more, no less.\n */\n- Assert(index > 0);\n Assert(vacrel->num_index_scans > 1 ||\n- (index == vacrel->lpdead_items &&\n+ (TidStoreNumTids(vacrel->dead_items) == vacrel->lpdead_items &&\n vacuumed_pages == vacrel->lpdead_item_pages));\n\n ereport(DEBUG2,\n- (errmsg(\"table \\\"%s\\\": removed %lld dead item identifiers in %u pages\",\n- vacrel->relname, (long long) index, vacuumed_pages)));\n+ (errmsg(\"table \\\"%s\\\": removed \" INT64_FORMAT \"dead item identifiers\nin %u pages\",\n+ vacrel->relname, TidStoreNumTids(vacrel->dead_items),\n+ vacuumed_pages)));\n\nWe assert that vacrel->lpdead_items has the expected value, and then\nthe ereport repeats the function call (with a lock) to read the value\nwe just consulted to pass the assert.\n\nIf we *really* want to compare counts, maybe we could invent a\ndebugging-only function that iterates over the tree and popcounts the\nbitmaps. That seems too expensive for regular assert builds, though.\n\nOn the subject of debugging builds, I think it no longer makes sense\nto have the array for debug checking in tid store, even during\ndevelopment. A few months ago, we had an encoding scheme that looked\nsimple on paper, but its code was fiendishly difficult to follow (at\nleast for me). That's gone. In addition to the debugging count above,\nwe could also put a copy of the key in the BlockTableEntry's header,\nin debug builds. We don't yet need to care about the key size, since\nwe don't (yet) have runtime-embeddable values.\n\n> Currently (as of v52 patch) RT_FIND is\n> doing so,\n\n[meaning, there is no internal \"automatic\" locking here since after we\nswitched to variable-length types, an outstanding TODO]\nMaybe it's okay to expose global locking for v17. I have one possible\nalternative:\n\nThis week I tried an idea to use a callback there so that after\ninternal unlocking, the caller received the value (or whatever else\nneeds to happen, such as lookup an offset in the tid bitmap). I've\nattached a draft for that that passes radix tree tests. It's a bit\nawkward, but I'm guessing this would more closely match future\ninternal atomic locking. Let me know what you think of the concept,\nand then do whichever way you think is best. (using v53 as the basis)\n\nI believe this is the only open question remaining. The rest is just\npolish and testing.\n\n> During trying this idea, I realized that there is a visibility problem\n> in the radix tree template\n\nIf it's broken even without the embedding I'll look into this (I don't\nknow if this configuration has ever been tested). I think a good test\nis putting the shared tid tree in it's own translation unit, to see if\nanything needs to be fixed. I'll go try that.", "msg_date": "Thu, 18 Jan 2024 11:30:01 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Jan 18, 2024 at 1:30 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Jan 18, 2024 at 8:31 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > It seems we cannot make this work nicely. IIUC VacDeadItems is\n> > allocated in DSM and TidStore is embedded there. However,\n> > dead_items->items.area is a local pointer to dsa_area. So we cannot\n> > include dsa_area in neither TidStore nor RT_RADIX_TREE. Instead we\n> > would need to pass dsa_area to each interface by callers.\n>\n> Thanks again for exploring this line of thinking! Okay, it seems even\n> if there's a way to make this work, it would be too invasive to\n> justify when compared with the advantage I was hoping for.\n>\n> > > If we can't make this work nicely, I'd be okay with keeping the tid\n> > > store control object. My biggest concern is unnecessary\n> > > double-locking.\n> >\n> > If we don't do any locking stuff in radix tree APIs and it's the\n> > user's responsibility at all, probably we don't need a lock for\n> > tidstore? That is, we expose lock functions as you mentioned and the\n> > user (like tidstore) acquires/releases the lock before/after accessing\n> > the radix tree and num_items.\n>\n> I'm not quite sure what the point of \"num_items\" is anymore, because\n> it was really tied to the array in VacDeadItems. dead_items->num_items\n> is essential to reading/writing the array correctly. If this number is\n> wrong, the array is corrupt. There is no such requirement for the\n> radix tree. We don't need to know the number of tids to add to it or\n> do a lookup, or anything.\n\nTrue. Sorry I wanted to say \"num_tids\" of TidStore. I'm still thinking\nwe need to have the number of TIDs in a tidstore, especially in the\ntidstore's control object.\n\n>\n> There are a number of places where we assert \"the running count of the\n> dead items\" is the same as \"the length of the dead items array\", like\n> here:\n>\n> @@ -2214,7 +2205,7 @@ lazy_vacuum(LVRelState *vacrel)\n> BlockNumber threshold;\n>\n> Assert(vacrel->num_index_scans == 0);\n> - Assert(vacrel->lpdead_items == vacrel->dead_items->num_items);\n> + Assert(vacrel->lpdead_items == TidStoreNumTids(vacrel->dead_items));\n>\n> As such, in HEAD I'm guessing it's arbitrary which one is used for\n> control flow. Correct me if I'm mistaken. If I am wrong for some part\n> of the code, it'd be good to understand when that invariant can't be\n> maintained.\n>\n> @@ -1258,7 +1265,7 @@ lazy_scan_heap(LVRelState *vacrel)\n> * Do index vacuuming (call each index's ambulkdelete routine), then do\n> * related heap vacuuming\n> */\n> - if (dead_items->num_items > 0)\n> + if (TidStoreNumTids(dead_items) > 0)\n> lazy_vacuum(vacrel);\n>\n> Like here. In HEAD, could this have used vacrel->dead_items?\n>\n> @@ -2479,14 +2473,14 @@ lazy_vacuum_heap_rel(LVRelState *vacrel)\n> * We set all LP_DEAD items from the first heap pass to LP_UNUSED during\n> * the second heap pass. No more, no less.\n> */\n> - Assert(index > 0);\n> Assert(vacrel->num_index_scans > 1 ||\n> - (index == vacrel->lpdead_items &&\n> + (TidStoreNumTids(vacrel->dead_items) == vacrel->lpdead_items &&\n> vacuumed_pages == vacrel->lpdead_item_pages));\n>\n> ereport(DEBUG2,\n> - (errmsg(\"table \\\"%s\\\": removed %lld dead item identifiers in %u pages\",\n> - vacrel->relname, (long long) index, vacuumed_pages)));\n> + (errmsg(\"table \\\"%s\\\": removed \" INT64_FORMAT \"dead item identifiers\n> in %u pages\",\n> + vacrel->relname, TidStoreNumTids(vacrel->dead_items),\n> + vacuumed_pages)));\n>\n> We assert that vacrel->lpdead_items has the expected value, and then\n> the ereport repeats the function call (with a lock) to read the value\n> we just consulted to pass the assert.\n>\n> If we *really* want to compare counts, maybe we could invent a\n> debugging-only function that iterates over the tree and popcounts the\n> bitmaps. That seems too expensive for regular assert builds, though.\n\nIIUC lpdead_items is the total number of LP_DEAD items vacuumed during\nthe whole lazy vacuum operation whereas num_items is the number of\nLP_DEAD items vacuumed within one index vacuum and heap vacuum cycle.\nThat is, after heap vacuum, the latter counter is reset while the\nformer counter is not.\n\nThe latter counter is used in lazyvacuum.c as well as the ereport in\nvac_bulkdel_one_index().\n\n>\n> On the subject of debugging builds, I think it no longer makes sense\n> to have the array for debug checking in tid store, even during\n> development. A few months ago, we had an encoding scheme that looked\n> simple on paper, but its code was fiendishly difficult to follow (at\n> least for me). That's gone. In addition to the debugging count above,\n> we could also put a copy of the key in the BlockTableEntry's header,\n> in debug builds. We don't yet need to care about the key size, since\n> we don't (yet) have runtime-embeddable values.\n\nPutting a copy of the key in BlocktableEntry's header is an\ninteresting idea. But the current debug code in the tidstore also\nmakes sure that the tidstore returns TIDs in the correct order during\nan iterate operation. I think it still has a value and you can disable\nit by removing the \"#define TIDSTORE_DEBUG\" line.\n\n>\n> > Currently (as of v52 patch) RT_FIND is\n> > doing so,\n>\n> [meaning, there is no internal \"automatic\" locking here since after we\n> switched to variable-length types, an outstanding TODO]\n> Maybe it's okay to expose global locking for v17. I have one possible\n> alternative:\n>\n> This week I tried an idea to use a callback there so that after\n> internal unlocking, the caller received the value (or whatever else\n> needs to happen, such as lookup an offset in the tid bitmap). I've\n> attached a draft for that that passes radix tree tests. It's a bit\n> awkward, but I'm guessing this would more closely match future\n> internal atomic locking. Let me know what you think of the concept,\n> and then do whichever way you think is best. (using v53 as the basis)\n\nThank you for verifying this idea! Interesting. While it's promising\nin terms of future atomic locking, I'm concerned it might not be easy\nto use if radix tree APIs supports only such callback style. I believe\nthe caller would like to pass one more data along with val_data. For\nexample, considering tidstore that has num_tids internally, it wants\nto pass both a pointer to BlocktableEntry and a pointer to TidStore\nitself so that it increments the counter while holding a lock.\n\nAnother API idea for future atomic locking is to separate\nRT_SET()/RT_FIND() into begin and end. In RT_SET_BEGIN() API, we find\nthe key, extend nodes if necessary, set the value, and return the\nresult while holding the lock. For example, if the radix tree supports\nlock coupling, the leaf node and its parent remain locked. Then the\ncaller does its job and calls RT_SET_END() that does cleanup stuff\nsuch as releasing locks.\n\nI've not fully considered this approach but even this idea seems\ncomplex and easy to use. I prefer the current simple approach as we\nsupport the simple locking mechanism for now.\n\n>\n> I believe this is the only open question remaining. The rest is just\n> polish and testing.\n\nRight.\n\n>\n> > During trying this idea, I realized that there is a visibility problem\n> > in the radix tree template\n>\n> If it's broken even without the embedding I'll look into this (I don't\n> know if this configuration has ever been tested). I think a good test\n> is putting the shared tid tree in it's own translation unit, to see if\n> anything needs to be fixed. I'll go try that.\n\nThanks.\n\nBTW in radixtree.h pg_attribute_unused() is used for some functions,\nbut is it for debugging purposes? I don't see why it's used only for\nsome functions.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 19 Jan 2024 16:26:17 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jan 19, 2024 at 2:26 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Jan 18, 2024 at 1:30 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > I'm not quite sure what the point of \"num_items\" is anymore, because\n> > it was really tied to the array in VacDeadItems. dead_items->num_items\n> > is essential to reading/writing the array correctly. If this number is\n> > wrong, the array is corrupt. There is no such requirement for the\n> > radix tree. We don't need to know the number of tids to add to it or\n> > do a lookup, or anything.\n>\n> True. Sorry I wanted to say \"num_tids\" of TidStore. I'm still thinking\n> we need to have the number of TIDs in a tidstore, especially in the\n> tidstore's control object.\n\nHmm, it would be kind of sad to require explicit locking in tidstore.c\nis only for maintaining that one number at all times. Aside from the\ntwo ereports after an index scan / second heap pass, the only\nnon-assert place where it's used is\n\n@@ -1258,7 +1265,7 @@ lazy_scan_heap(LVRelState *vacrel)\n * Do index vacuuming (call each index's ambulkdelete routine), then do\n * related heap vacuuming\n */\n- if (dead_items->num_items > 0)\n+ if (TidStoreNumTids(dead_items) > 0)\n lazy_vacuum(vacrel);\n\n...and that condition can be checked by doing a single step of\niteration to see if it shows anything. But for the ereport, my idea\nfor iteration + popcount is probably quite slow.\n\n> IIUC lpdead_items is the total number of LP_DEAD items vacuumed during\n> the whole lazy vacuum operation whereas num_items is the number of\n> LP_DEAD items vacuumed within one index vacuum and heap vacuum cycle.\n> That is, after heap vacuum, the latter counter is reset while the\n> former counter is not.\n>\n> The latter counter is used in lazyvacuum.c as well as the ereport in\n> vac_bulkdel_one_index().\n\nAh, of course.\n\n> Putting a copy of the key in BlocktableEntry's header is an\n> interesting idea. But the current debug code in the tidstore also\n> makes sure that the tidstore returns TIDs in the correct order during\n> an iterate operation. I think it still has a value and you can disable\n> it by removing the \"#define TIDSTORE_DEBUG\" line.\n\nFair enough. I just thought it'd be less work to leave this out in\ncase we change how locking is called.\n\n> > This week I tried an idea to use a callback there so that after\n> > internal unlocking, the caller received the value (or whatever else\n> > needs to happen, such as lookup an offset in the tid bitmap). I've\n> > attached a draft for that that passes radix tree tests. It's a bit\n> > awkward, but I'm guessing this would more closely match future\n> > internal atomic locking. Let me know what you think of the concept,\n> > and then do whichever way you think is best. (using v53 as the basis)\n>\n> Thank you for verifying this idea! Interesting. While it's promising\n> in terms of future atomic locking, I'm concerned it might not be easy\n> to use if radix tree APIs supports only such callback style.\n\nYeah, it's quite awkward. It could be helped by only exposing it for\nvarlen types. For simply returning \"present or not\" (used a lot in the\nregression tests), we could skip the callback if the data is null.\nThat is all also extra stuff.\n\n> I believe\n> the caller would like to pass one more data along with val_data. For\n\nThat's trivial, however, if I understand you correctly. With \"void *\",\na callback can receive anything, including a struct containing\nadditional pointers to elsewhere.\n\n> example, considering tidstore that has num_tids internally, it wants\n> to pass both a pointer to BlocktableEntry and a pointer to TidStore\n> itself so that it increments the counter while holding a lock.\n\nHmm, so a callback to RT_SET also. That's interesting!\n\nAnyway, I agree it needs to be simple, since the first use doesn't\neven have multiple writers.\n\n> BTW in radixtree.h pg_attribute_unused() is used for some functions,\n> but is it for debugging purposes? I don't see why it's used only for\n> some functions.\n\nIt was there to silence warnings about unused functions. I only see\none remaining, and it's already behind a debug symbol, so we might not\nneed this attribute anymore.\n\n\n", "msg_date": "Fri, 19 Jan 2024 16:48:23 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I wrote:\n\n> On Thu, Jan 18, 2024 at 8:31 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > During trying this idea, I realized that there is a visibility problem\n> > in the radix tree template\n>\n> If it's broken even without the embedding I'll look into this (I don't\n> know if this configuration has ever been tested). I think a good test\n> is putting the shared tid tree in it's own translation unit, to see if\n> anything needs to be fixed. I'll go try that.\n\nHere's a quick test that this works. The only thing that really needed\nfixing in the template was failure to un-define one symbol. The rest\nwas just moving some things around.", "msg_date": "Fri, 19 Jan 2024 18:27:09 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jan 19, 2024 at 6:48 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Fri, Jan 19, 2024 at 2:26 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Jan 18, 2024 at 1:30 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > I'm not quite sure what the point of \"num_items\" is anymore, because\n> > > it was really tied to the array in VacDeadItems. dead_items->num_items\n> > > is essential to reading/writing the array correctly. If this number is\n> > > wrong, the array is corrupt. There is no such requirement for the\n> > > radix tree. We don't need to know the number of tids to add to it or\n> > > do a lookup, or anything.\n> >\n> > True. Sorry I wanted to say \"num_tids\" of TidStore. I'm still thinking\n> > we need to have the number of TIDs in a tidstore, especially in the\n> > tidstore's control object.\n>\n> Hmm, it would be kind of sad to require explicit locking in tidstore.c\n> is only for maintaining that one number at all times. Aside from the\n> two ereports after an index scan / second heap pass, the only\n> non-assert place where it's used is\n>\n> @@ -1258,7 +1265,7 @@ lazy_scan_heap(LVRelState *vacrel)\n> * Do index vacuuming (call each index's ambulkdelete routine), then do\n> * related heap vacuuming\n> */\n> - if (dead_items->num_items > 0)\n> + if (TidStoreNumTids(dead_items) > 0)\n> lazy_vacuum(vacrel);\n>\n> ...and that condition can be checked by doing a single step of\n> iteration to see if it shows anything. But for the ereport, my idea\n> for iteration + popcount is probably quite slow.\n\nRight.\n\nOn further thought, as you pointed out before, \"num_tids\" should not\nbe in tidstore in terms of integration with tidbitmap.c, because\ntidbitmap.c has \"lossy pages\". With lossy pages, \"num_tids\" is no\nlonger accurate and useful. Similarly, looking at tidbitmap.c, it has\nnpages and nchunks but they will not be necessary in lazy vacuum use\ncase. Also, assuming that we support parallel heap pruning, probably\nwe need to somehow lock the tidstore while adding tids to the tidstore\nconcurrently by parallel vacuum worker. But in tidbitmap use case, we\ndon't need to lock the tidstore since it doesn't have multiple\nwriters. Given these facts, different statistics and different lock\nstrategies are required by different use case. So I think there are 3\noptions:\n\n1. expose lock functions for tidstore and the caller manages the\nstatistics in the outside of tidstore. For example, in lazyvacuum.c we\nwould have a TidStore for tid storage as well as VacDeadItemsInfo that\nhas num_tids and max_bytes. Both are in LVRelState. For parallel\nvacuum, we pass both to the workers via DSM and pass both to function\nwhere the statistics are required. As for the exposed lock functions,\nwhen adding tids to the tidstore, the caller would need to call\nsomething like TidStoreLockExclusive(ts) that further calls\nLWLockAcquire(ts->tree.shared->ctl.lock, LW_EXCLUSIVE) internally.\n\n2. add callback functions to tidstore so that the caller can do its\nwork while holding a lock on the tidstore. This is like the idea we\njust discussed for radix tree. The caller passes a callback function\nand user data to TidStoreSetBlockOffsets(), and the callback is called\nafter setting tids. Similar to option 1, the statistics need to be\nstored in a different area.\n\n3. keep tidstore.c and tidbitmap.c separate implementations but use\nradix tree in tidbitmap.c. tidstore.c would have \"num_tids\" in its\ncontrol object and doesn't have any lossy page support. On the other\nhand, in tidbitmap.c we replace simplehash with radix tree. This makes\ntidstore.c simple but we would end up having different data structures\nfor similar usage.\n\nI think it's worth trying option 1. What do you think, John?\n\n>\n> > IIUC lpdead_items is the total number of LP_DEAD items vacuumed during\n> > the whole lazy vacuum operation whereas num_items is the number of\n> > LP_DEAD items vacuumed within one index vacuum and heap vacuum cycle.\n> > That is, after heap vacuum, the latter counter is reset while the\n> > former counter is not.\n> >\n> > The latter counter is used in lazyvacuum.c as well as the ereport in\n> > vac_bulkdel_one_index().\n>\n> Ah, of course.\n>\n> > Putting a copy of the key in BlocktableEntry's header is an\n> > interesting idea. But the current debug code in the tidstore also\n> > makes sure that the tidstore returns TIDs in the correct order during\n> > an iterate operation. I think it still has a value and you can disable\n> > it by removing the \"#define TIDSTORE_DEBUG\" line.\n>\n> Fair enough. I just thought it'd be less work to leave this out in\n> case we change how locking is called.\n>\n> > > This week I tried an idea to use a callback there so that after\n> > > internal unlocking, the caller received the value (or whatever else\n> > > needs to happen, such as lookup an offset in the tid bitmap). I've\n> > > attached a draft for that that passes radix tree tests. It's a bit\n> > > awkward, but I'm guessing this would more closely match future\n> > > internal atomic locking. Let me know what you think of the concept,\n> > > and then do whichever way you think is best. (using v53 as the basis)\n> >\n> > Thank you for verifying this idea! Interesting. While it's promising\n> > in terms of future atomic locking, I'm concerned it might not be easy\n> > to use if radix tree APIs supports only such callback style.\n>\n> Yeah, it's quite awkward. It could be helped by only exposing it for\n> varlen types. For simply returning \"present or not\" (used a lot in the\n> regression tests), we could skip the callback if the data is null.\n> That is all also extra stuff.\n>\n> > I believe\n> > the caller would like to pass one more data along with val_data. For\n>\n> That's trivial, however, if I understand you correctly. With \"void *\",\n> a callback can receive anything, including a struct containing\n> additional pointers to elsewhere.\n>\n> > example, considering tidstore that has num_tids internally, it wants\n> > to pass both a pointer to BlocktableEntry and a pointer to TidStore\n> > itself so that it increments the counter while holding a lock.\n>\n> Hmm, so a callback to RT_SET also. That's interesting!\n>\n> Anyway, I agree it needs to be simple, since the first use doesn't\n> even have multiple writers.\n\nRight.\n\n>\n> > BTW in radixtree.h pg_attribute_unused() is used for some functions,\n> > but is it for debugging purposes? I don't see why it's used only for\n> > some functions.\n>\n> It was there to silence warnings about unused functions. I only see\n> one remaining, and it's already behind a debug symbol, so we might not\n> need this attribute anymore.\n\nOkay.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 22 Jan 2024 12:27:36 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 22, 2024 at 10:28 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On further thought, as you pointed out before, \"num_tids\" should not\n> be in tidstore in terms of integration with tidbitmap.c, because\n> tidbitmap.c has \"lossy pages\". With lossy pages, \"num_tids\" is no\n> longer accurate and useful. Similarly, looking at tidbitmap.c, it has\n> npages and nchunks but they will not be necessary in lazy vacuum use\n> case. Also, assuming that we support parallel heap pruning, probably\n> we need to somehow lock the tidstore while adding tids to the tidstore\n> concurrently by parallel vacuum worker. But in tidbitmap use case, we\n> don't need to lock the tidstore since it doesn't have multiple\n> writers.\n\nNot currently, and it does seem bad to require locking where it's not required.\n\n(That would be a prerequisite for parallel index scan. It's been tried\nbefore with the hash table, but concurrency didn't scale well with the\nhash table. I have no reason to think that the radix tree would scale\nsignificantly better with the same global LW lock, but as you know\nthere are other locking schemes possible.)\n\n> Given these facts, different statistics and different lock\n> strategies are required by different use case. So I think there are 3\n> options:\n>\n> 1. expose lock functions for tidstore and the caller manages the\n> statistics in the outside of tidstore. For example, in lazyvacuum.c we\n> would have a TidStore for tid storage as well as VacDeadItemsInfo that\n> has num_tids and max_bytes. Both are in LVRelState. For parallel\n> vacuum, we pass both to the workers via DSM and pass both to function\n> where the statistics are required. As for the exposed lock functions,\n> when adding tids to the tidstore, the caller would need to call\n> something like TidStoreLockExclusive(ts) that further calls\n> LWLockAcquire(ts->tree.shared->ctl.lock, LW_EXCLUSIVE) internally.\n\nThe advantage here is that vacuum can avoid locking entirely while\nusing shared memory, just like it does now, and has the option to add\nit later.\nIIUC, the radix tree struct would have a lock member, but wouldn't\ntake any locks internally? Maybe we still need one for\nRT_MEMORY_USAGE? For that, I see dsa_get_total_size() takes its own\nDSA_AREA_LOCK -- maybe that's enough?\n\nThat seems simplest, and is not very far from what we do now. If we do\nthis, then the lock functions should be where we branch for is_shared.\n\n> 2. add callback functions to tidstore so that the caller can do its\n> work while holding a lock on the tidstore. This is like the idea we\n> just discussed for radix tree. The caller passes a callback function\n> and user data to TidStoreSetBlockOffsets(), and the callback is called\n> after setting tids. Similar to option 1, the statistics need to be\n> stored in a different area.\n\nI think we'll have to move to something like this eventually, but it\nseems like overkill right now.\n\n> 3. keep tidstore.c and tidbitmap.c separate implementations but use\n> radix tree in tidbitmap.c. tidstore.c would have \"num_tids\" in its\n> control object and doesn't have any lossy page support. On the other\n> hand, in tidbitmap.c we replace simplehash with radix tree. This makes\n> tidstore.c simple but we would end up having different data structures\n> for similar usage.\n\nThey have so much in common that it's worth it to use the same\ninterface and (eventually) value type. They just need separate paths\nfor adding tids, as we've discussed.\n\n> I think it's worth trying option 1. What do you think, John?\n\n+1\n\n\n", "msg_date": "Mon, 22 Jan 2024 12:35:49 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jan 17, 2024 at 12:32 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> I wrote:\n>\n> > > Hmm, I wonder if that's a side-effect of the \"create\" functions doing\n> > > their own allocations and returning a pointer. Would it be less tricky\n> > > if the structs were declared where we need them and passed to \"init\"\n> > > functions?\n>\n> If this is a possibility, I thought I'd first send the last (I hope)\n> large-ish set of radix tree cleanups to avoid rebasing issues. I'm not\n> including tidstore/vacuum here, because recent discussion has some\n> up-in-the-air work.\n\nThank you for updating the patches! These updates look good to me.\n\n>\n> Should be self-explanatory, but some thing are worth calling out:\n> 0012 and 0013: Some time ago I started passing insertpos as a\n> parameter, but now see that is not ideal -- when growing from node16\n> to node48 we don't need it at all, so it's a wasted calculation. While\n> reverting that, I found that this also allows passing constants in\n> some cases.\n> 0014 makes a cleaner separation between adding a child and growing a\n> node, resulting in more compact-looking functions.\n> 0019 is a bit unpolished, but I realized that it's pointless to assign\n> a zero child when further up the call stack we overwrite it anyway\n> with the actual value. With this, that assignment is skipped. This\n> makes some comments and names strange, so needs a bit of polish, but\n> wanted to get it out there anyway.\n\nCool.\n\nI'll merge these patches in the next version v54 patch set.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 22 Jan 2024 16:00:08 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 22, 2024 at 2:36 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Mon, Jan 22, 2024 at 10:28 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On further thought, as you pointed out before, \"num_tids\" should not\n> > be in tidstore in terms of integration with tidbitmap.c, because\n> > tidbitmap.c has \"lossy pages\". With lossy pages, \"num_tids\" is no\n> > longer accurate and useful. Similarly, looking at tidbitmap.c, it has\n> > npages and nchunks but they will not be necessary in lazy vacuum use\n> > case. Also, assuming that we support parallel heap pruning, probably\n> > we need to somehow lock the tidstore while adding tids to the tidstore\n> > concurrently by parallel vacuum worker. But in tidbitmap use case, we\n> > don't need to lock the tidstore since it doesn't have multiple\n> > writers.\n>\n> Not currently, and it does seem bad to require locking where it's not required.\n>\n> (That would be a prerequisite for parallel index scan. It's been tried\n> before with the hash table, but concurrency didn't scale well with the\n> hash table. I have no reason to think that the radix tree would scale\n> significantly better with the same global LW lock, but as you know\n> there are other locking schemes possible.)\n>\n> > Given these facts, different statistics and different lock\n> > strategies are required by different use case. So I think there are 3\n> > options:\n> >\n> > 1. expose lock functions for tidstore and the caller manages the\n> > statistics in the outside of tidstore. For example, in lazyvacuum.c we\n> > would have a TidStore for tid storage as well as VacDeadItemsInfo that\n> > has num_tids and max_bytes. Both are in LVRelState. For parallel\n> > vacuum, we pass both to the workers via DSM and pass both to function\n> > where the statistics are required. As for the exposed lock functions,\n> > when adding tids to the tidstore, the caller would need to call\n> > something like TidStoreLockExclusive(ts) that further calls\n> > LWLockAcquire(ts->tree.shared->ctl.lock, LW_EXCLUSIVE) internally.\n>\n> The advantage here is that vacuum can avoid locking entirely while\n> using shared memory, just like it does now, and has the option to add\n> it later.\n\nTrue.\n\n> IIUC, the radix tree struct would have a lock member, but wouldn't\n> take any locks internally? Maybe we still need one for\n> RT_MEMORY_USAGE? For that, I see dsa_get_total_size() takes its own\n> DSA_AREA_LOCK -- maybe that's enough?\n\nI think that's a good point. So there will be no place where the radix\ntree takes any locks internally.\n\n>\n> That seems simplest, and is not very far from what we do now. If we do\n> this, then the lock functions should be where we branch for is_shared.\n\nAgreed.\n\n>\n> > 2. add callback functions to tidstore so that the caller can do its\n> > work while holding a lock on the tidstore. This is like the idea we\n> > just discussed for radix tree. The caller passes a callback function\n> > and user data to TidStoreSetBlockOffsets(), and the callback is called\n> > after setting tids. Similar to option 1, the statistics need to be\n> > stored in a different area.\n>\n> I think we'll have to move to something like this eventually, but it\n> seems like overkill right now.\n\nRight.\n\n>\n> > 3. keep tidstore.c and tidbitmap.c separate implementations but use\n> > radix tree in tidbitmap.c. tidstore.c would have \"num_tids\" in its\n> > control object and doesn't have any lossy page support. On the other\n> > hand, in tidbitmap.c we replace simplehash with radix tree. This makes\n> > tidstore.c simple but we would end up having different data structures\n> > for similar usage.\n>\n> They have so much in common that it's worth it to use the same\n> interface and (eventually) value type. They just need separate paths\n> for adding tids, as we've discussed.\n\nAgreed.\n\n>\n> > I think it's worth trying option 1. What do you think, John?\n>\n> +1\n\nThanks!\n\nBefore working on this idea, since the latest patches conflict with\nthe current HEAD, I share the latest patch set (v54). Here is the\nsummary:\n\n- As for radix tree part, it's based on v53 patch. I've squashed most\nof cleanups and changes in v53 except for \"DRAFT: Stop using invalid\npointers as placeholders.\" as I thought you might want to still work\non it. BTW it includes \"#undef RT_SHMEM\".\n- As for tidstore, it's based on v51. That is, it still has the\ncontrol object and num_tids there.\n- As for vacuum integration, it's also based on v51. But we no longer\nneed to change has_lpdead_items and LVPagePruneState thanks to the\nrecent commit c120550edb8 and e313a61137.\n\nFor the next version patch, I'll work on this idea and try to clean up\nlocking stuff both in tidstore and radix tree. Or if you're already\nworking on some of them, please let me know. I'll review it.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 22 Jan 2024 16:23:43 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 22, 2024 at 2:24 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> For the next version patch, I'll work on this idea and try to clean up\n> locking stuff both in tidstore and radix tree. Or if you're already\n> working on some of them, please let me know. I'll review it.\n\nOkay go ahead, sounds good. I plan to look at the tests since they\nhaven't been looked at in a while.\n\n\n", "msg_date": "Mon, 22 Jan 2024 15:18:03 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 22, 2024 at 5:18 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Mon, Jan 22, 2024 at 2:24 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > For the next version patch, I'll work on this idea and try to clean up\n> > locking stuff both in tidstore and radix tree. Or if you're already\n> > working on some of them, please let me know. I'll review it.\n>\n> Okay go ahead, sounds good. I plan to look at the tests since they\n> haven't been looked at in a while.\n\nI've attached the latest patch set. Here are updates from v54 patch:\n\n0005 - Expose radix tree lock functions and remove all locks taken\ninternally in radixtree.h.\n0008 - Remove tidstore's control object.\n0009 - Add tidstore lock functions.\n0011 - Add VacDeadItemsInfo to store \"max_bytes\" and \"num_items\"\nseparate from TidStore. Also make lazy vacuum and parallel vacuum use\nit.\n\nThe new patches probably need to be polished but the VacDeadItemInfo\nidea looks good to me.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 23 Jan 2024 12:58:00 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 23, 2024 at 12:58 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Jan 22, 2024 at 5:18 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Mon, Jan 22, 2024 at 2:24 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > For the next version patch, I'll work on this idea and try to clean up\n> > > locking stuff both in tidstore and radix tree. Or if you're already\n> > > working on some of them, please let me know. I'll review it.\n> >\n> > Okay go ahead, sounds good. I plan to look at the tests since they\n> > haven't been looked at in a while.\n>\n> I've attached the latest patch set. Here are updates from v54 patch:\n>\n> 0005 - Expose radix tree lock functions and remove all locks taken\n> internally in radixtree.h.\n> 0008 - Remove tidstore's control object.\n> 0009 - Add tidstore lock functions.\n> 0011 - Add VacDeadItemsInfo to store \"max_bytes\" and \"num_items\"\n> separate from TidStore. Also make lazy vacuum and parallel vacuum use\n> it.\n\nJohn pointed out offlist the tarball includes only the patches up to\n0009. I've attached the correct one.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 23 Jan 2024 16:48:25 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 23, 2024 at 10:58 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> The new patches probably need to be polished but the VacDeadItemInfo\n> idea looks good to me.\n\nThat idea looks good to me, too. Since you already likely know what\nyou'd like to polish, I don't have much to say except for a few\nquestions below. I also did a quick sweep through every patch, so some\nof these comments are unrelated to recent changes:\n\nv55-0003:\n\n+size_t\n+dsa_get_total_size(dsa_area *area)\n+{\n+ size_t size;\n+\n+ LWLockAcquire(DSA_AREA_LOCK(area), LW_SHARED);\n+ size = area->control->total_segment_size;\n+ LWLockRelease(DSA_AREA_LOCK(area));\n\nI looked and found dsa.c doesn't already use shared locks in HEAD,\neven dsa_dump. Not sure why that is...\n\n+/*\n+ * Calculate the slab blocksize so that we can allocate at least 32 chunks\n+ * from the block.\n+ */\n+#define RT_SLAB_BLOCK_SIZE(size) \\\n+ Max((SLAB_DEFAULT_BLOCK_SIZE / (size)) * (size), (size) * 32)\n\nThe first parameter seems to be trying to make the block size exact,\nbut that's not right, because of the chunk header, and maybe\nalignment. If the default block size is big enough to waste only a\ntiny amount of space, let's just use that as-is. Also, I think all\nblock sizes in the code base have been a power of two, but I'm not\nsure how much that matters.\n\n+#ifdef RT_SHMEM\n+ fprintf(stderr, \" [%d] chunk %x slot \" DSA_POINTER_FORMAT \"\\n\",\n+ i, n4->chunks[i], n4->children[i]);\n+#else\n+ fprintf(stderr, \" [%d] chunk %x slot %p\\n\",\n+ i, n4->chunks[i], n4->children[i]);\n+#endif\n\nMaybe we could invent a child pointer format, so we only #ifdef in one place.\n\n--- /dev/null\n+++ b/src/test/modules/test_radixtree/meson.build\n@@ -0,0 +1,35 @@\n+# FIXME: prevent install during main install, but not during test :/\n\nCan you look into this?\n\ntest_radixtree.c:\n\n+/*\n+ * XXX: should we expose and use RT_SIZE_CLASS and RT_SIZE_CLASS_INFO?\n+ */\n+static int rt_node_class_fanouts[] = {\n+ 4, /* RT_CLASS_3 */\n+ 15, /* RT_CLASS_32_MIN */\n+ 32, /* RT_CLASS_32_MAX */\n+ 125, /* RT_CLASS_125 */\n+ 256 /* RT_CLASS_256 */\n+};\n\nThese numbers have been wrong a long time, too, but only matters for\nfiguring out where it went wrong when something is broken. And for the\nXXX, instead of trying to use the largest number that should fit (it's\nobviously not testing that the expected node can actually hold that\nnumber anyway), it seems we can just use a \"big enough\" number to\ncause growing into the desired size class.\n\nAs far as cleaning up the tests, I always wondered why these didn't\nuse EXPECT_TRUE, EXPECT_FALSE, etc. as in Andres's prototype where\nwhere convenient, and leave comments above the tests. That seemed like\na good idea to me -- was there a reason to have hand-written branches\nand elog messages everywhere?\n\n--- a/src/tools/pginclude/cpluspluscheck\n+++ b/src/tools/pginclude/cpluspluscheck\n@@ -101,6 +101,12 @@ do\n test \"$f\" = src/include/nodes/nodetags.h && continue\n test \"$f\" = src/backend/nodes/nodetags.h && continue\n\n+ # radixtree_*_impl.h cannot be included standalone: they are just\ncode fragments.\n+ test \"$f\" = src/include/lib/radixtree_delete_impl.h && continue\n+ test \"$f\" = src/include/lib/radixtree_insert_impl.h && continue\n+ test \"$f\" = src/include/lib/radixtree_iter_impl.h && continue\n+ test \"$f\" = src/include/lib/radixtree_search_impl.h && continue\n\nHa! I'd forgotten about these -- they're long outdated.\n\nv55-0005:\n\n- * The radix tree is locked in shared mode during the iteration, so\n- * RT_END_ITERATE needs to be called when finished to release the lock.\n+ * The caller needs to acquire a lock in shared mode during the iteration\n+ * if necessary.\n\n\"need if necessary\" is maybe better phrased as \"is the caller's responsibility\"\n\n+ /*\n+ * We can rely on DSA_AREA_LOCK to get the total amount of DSA memory.\n+ */\n total = dsa_get_total_size(tree->dsa);\n\nMaybe better to have a header comment for RT_MEMORY_USAGE that the\ncaller doesn't need to take a lock.\n\nv55-0006:\n\n\"WIP: Not built, since some benchmarks have broken\" -- I'll work on\nthis when I re-run some benchmarks.\n\nv55-0007:\n\n+ * Internally, a tid is encoded as a pair of 64-bit key and 64-bit value,\n+ * and stored in the radix tree.\n\nThis hasn't been true for a few months now, and I thought we fixed\nthis in some earlier version?\n\n+ * TODO: The caller must be certain that no other backend will attempt to\n+ * access the TidStore before calling this function. Other backend must\n+ * explicitly call TidStoreDetach() to free up backend-local memory associated\n+ * with the TidStore. The backend that calls TidStoreDestroy() must not call\n+ * TidStoreDetach().\n\nDo we need to do anything now?\n\nv55-0008:\n\n-TidStoreAttach(dsa_area *area, TidStoreHandle handle)\n+TidStoreAttach(dsa_area *area, dsa_pointer rt_dp)\n\n\"handle\" seemed like a fine name. Is that not the case anymore? The\nnew one is kind of cryptic. The commit message just says \"remove\ncontrol object\" -- does that imply that we need to think of this\nparameter differently, or is it unrelated? (Same with\ndead_items_handle in 0011)\n\nv55-0011:\n\n+ /*\n+ * Recreate the tidstore with the same max_bytes limitation. We cannot\n+ * use neither maintenance_work_mem nor autovacuum_work_mem as they could\n+ * already be changed.\n+ */\n\nI don't understand this part.\n\n\n", "msg_date": "Wed, 24 Jan 2024 13:42:43 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jan 24, 2024 at 3:42 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Jan 23, 2024 at 10:58 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > The new patches probably need to be polished but the VacDeadItemInfo\n> > idea looks good to me.\n>\n> That idea looks good to me, too. Since you already likely know what\n> you'd like to polish, I don't have much to say except for a few\n> questions below. I also did a quick sweep through every patch, so some\n> of these comments are unrelated to recent changes:\n\nThank you!\n\n>\n> v55-0003:\n>\n> +size_t\n> +dsa_get_total_size(dsa_area *area)\n> +{\n> + size_t size;\n> +\n> + LWLockAcquire(DSA_AREA_LOCK(area), LW_SHARED);\n> + size = area->control->total_segment_size;\n> + LWLockRelease(DSA_AREA_LOCK(area));\n>\n> I looked and found dsa.c doesn't already use shared locks in HEAD,\n> even dsa_dump. Not sure why that is...\n\nOh, the dsa_dump part seems to be a bug. But it'll keep it consistent\nwith others.\n\n>\n> +/*\n> + * Calculate the slab blocksize so that we can allocate at least 32 chunks\n> + * from the block.\n> + */\n> +#define RT_SLAB_BLOCK_SIZE(size) \\\n> + Max((SLAB_DEFAULT_BLOCK_SIZE / (size)) * (size), (size) * 32)\n>\n> The first parameter seems to be trying to make the block size exact,\n> but that's not right, because of the chunk header, and maybe\n> alignment. If the default block size is big enough to waste only a\n> tiny amount of space, let's just use that as-is.\n\nAgreed.\n\n> Also, I think all\n> block sizes in the code base have been a power of two, but I'm not\n> sure how much that matters.\n\nDid you mean all slab block sizes we use in radixtree.h?\n\n>\n> +#ifdef RT_SHMEM\n> + fprintf(stderr, \" [%d] chunk %x slot \" DSA_POINTER_FORMAT \"\\n\",\n> + i, n4->chunks[i], n4->children[i]);\n> +#else\n> + fprintf(stderr, \" [%d] chunk %x slot %p\\n\",\n> + i, n4->chunks[i], n4->children[i]);\n> +#endif\n>\n> Maybe we could invent a child pointer format, so we only #ifdef in one place.\n\nWIll change.\n\n>\n> --- /dev/null\n> +++ b/src/test/modules/test_radixtree/meson.build\n> @@ -0,0 +1,35 @@\n> +# FIXME: prevent install during main install, but not during test :/\n>\n> Can you look into this?\n\nOkay, I'll look at it.\n\n>\n> test_radixtree.c:\n>\n> +/*\n> + * XXX: should we expose and use RT_SIZE_CLASS and RT_SIZE_CLASS_INFO?\n> + */\n> +static int rt_node_class_fanouts[] = {\n> + 4, /* RT_CLASS_3 */\n> + 15, /* RT_CLASS_32_MIN */\n> + 32, /* RT_CLASS_32_MAX */\n> + 125, /* RT_CLASS_125 */\n> + 256 /* RT_CLASS_256 */\n> +};\n>\n> These numbers have been wrong a long time, too, but only matters for\n> figuring out where it went wrong when something is broken. And for the\n> XXX, instead of trying to use the largest number that should fit (it's\n> obviously not testing that the expected node can actually hold that\n> number anyway), it seems we can just use a \"big enough\" number to\n> cause growing into the desired size class.\n>\n> As far as cleaning up the tests, I always wondered why these didn't\n> use EXPECT_TRUE, EXPECT_FALSE, etc. as in Andres's prototype where\n> where convenient, and leave comments above the tests. That seemed like\n> a good idea to me -- was there a reason to have hand-written branches\n> and elog messages everywhere?\n\nThe current test is based on test_integerset. I agree that we can\nimprove it by using EXPECT_TRUE etc.\n\n>\n> --- a/src/tools/pginclude/cpluspluscheck\n> +++ b/src/tools/pginclude/cpluspluscheck\n> @@ -101,6 +101,12 @@ do\n> test \"$f\" = src/include/nodes/nodetags.h && continue\n> test \"$f\" = src/backend/nodes/nodetags.h && continue\n>\n> + # radixtree_*_impl.h cannot be included standalone: they are just\n> code fragments.\n> + test \"$f\" = src/include/lib/radixtree_delete_impl.h && continue\n> + test \"$f\" = src/include/lib/radixtree_insert_impl.h && continue\n> + test \"$f\" = src/include/lib/radixtree_iter_impl.h && continue\n> + test \"$f\" = src/include/lib/radixtree_search_impl.h && continue\n>\n> Ha! I'd forgotten about these -- they're long outdated.\n\nWill remove.\n\n>\n> v55-0005:\n>\n> - * The radix tree is locked in shared mode during the iteration, so\n> - * RT_END_ITERATE needs to be called when finished to release the lock.\n> + * The caller needs to acquire a lock in shared mode during the iteration\n> + * if necessary.\n>\n> \"need if necessary\" is maybe better phrased as \"is the caller's responsibility\"\n\nWill fix.\n\n>\n> + /*\n> + * We can rely on DSA_AREA_LOCK to get the total amount of DSA memory.\n> + */\n> total = dsa_get_total_size(tree->dsa);\n>\n> Maybe better to have a header comment for RT_MEMORY_USAGE that the\n> caller doesn't need to take a lock.\n\nWill fix.\n\n>\n> v55-0006:\n>\n> \"WIP: Not built, since some benchmarks have broken\" -- I'll work on\n> this when I re-run some benchmarks.\n\nThanks!\n\n>\n> v55-0007:\n>\n> + * Internally, a tid is encoded as a pair of 64-bit key and 64-bit value,\n> + * and stored in the radix tree.\n>\n> This hasn't been true for a few months now, and I thought we fixed\n> this in some earlier version?\n\nYeah, I'll fix it.\n\n>\n> + * TODO: The caller must be certain that no other backend will attempt to\n> + * access the TidStore before calling this function. Other backend must\n> + * explicitly call TidStoreDetach() to free up backend-local memory associated\n> + * with the TidStore. The backend that calls TidStoreDestroy() must not call\n> + * TidStoreDetach().\n>\n> Do we need to do anything now?\n\nNo, will remove it.\n\n>\n> v55-0008:\n>\n> -TidStoreAttach(dsa_area *area, TidStoreHandle handle)\n> +TidStoreAttach(dsa_area *area, dsa_pointer rt_dp)\n>\n> \"handle\" seemed like a fine name. Is that not the case anymore? The\n> new one is kind of cryptic. The commit message just says \"remove\n> control object\" -- does that imply that we need to think of this\n> parameter differently, or is it unrelated? (Same with\n> dead_items_handle in 0011)\n\nSince it's actually just a radix tree's handle it was kind of\nunnatural to me to use the same dsa_pointer as different handles. But\nrethinking it, I agree \"handle\" is a fine name.\n\n>\n> v55-0011:\n>\n> + /*\n> + * Recreate the tidstore with the same max_bytes limitation. We cannot\n> + * use neither maintenance_work_mem nor autovacuum_work_mem as they could\n> + * already be changed.\n> + */\n>\n> I don't understand this part.\n\nI wanted to mean that if maintenance_work_mem is changed and the\nconfig file is reloaded, its value could no longer be the same as the\none that we used when initializing the parallel vacuum. That's why we\nneed to store max_bytes in the DSM. I'll rephrase it.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 26 Jan 2024 23:05:54 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Jan 26, 2024 at 11:05 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Wed, Jan 24, 2024 at 3:42 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Tue, Jan 23, 2024 at 10:58 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > The new patches probably need to be polished but the VacDeadItemInfo\n> > > idea looks good to me.\n> >\n> > That idea looks good to me, too. Since you already likely know what\n> > you'd like to polish, I don't have much to say except for a few\n> > questions below. I also did a quick sweep through every patch, so some\n> > of these comments are unrelated to recent changes:\n>\n> Thank you!\n>\n> >\n> > +/*\n> > + * Calculate the slab blocksize so that we can allocate at least 32 chunks\n> > + * from the block.\n> > + */\n> > +#define RT_SLAB_BLOCK_SIZE(size) \\\n> > + Max((SLAB_DEFAULT_BLOCK_SIZE / (size)) * (size), (size) * 32)\n> >\n> > The first parameter seems to be trying to make the block size exact,\n> > but that's not right, because of the chunk header, and maybe\n> > alignment. If the default block size is big enough to waste only a\n> > tiny amount of space, let's just use that as-is.\n>\n> Agreed.\n>\n\nAs of v55 patch, the sizes of each node class are:\n\n- node4: 40 bytes\n- node16_lo: 168 bytes\n- node16_hi: 296 bytes\n- node48: 784 bytes\n- node256: 2088 bytes\n\nIf we use SLAB_DEFAULT_BLOCK_SIZE (8kB) for each node class, we waste\n(approximately):\n\n- node4: 32 bytes\n- node16_lo: 128 bytes\n- node16_hi: 200 bytes\n- node48: 352 bytes\n- node256: 1928 bytes\n\nWe might want to calculate a better slab block size for node256 at least.\n\n> >\n> > + * TODO: The caller must be certain that no other backend will attempt to\n> > + * access the TidStore before calling this function. Other backend must\n> > + * explicitly call TidStoreDetach() to free up backend-local memory associated\n> > + * with the TidStore. The backend that calls TidStoreDestroy() must not call\n> > + * TidStoreDetach().\n> >\n> > Do we need to do anything now?\n>\n> No, will remove it.\n>\n\nI misunderstood something. I think the above statement is still true\nbut we don't need to do anything at this stage. It's a typical usage\nthat the leader destroys the shared data after confirming all workers\nare detached. It's not a TODO but probably a NOTE.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 29 Jan 2024 16:29:10 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 29, 2024 at 2:29 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> > > +/*\n> > > + * Calculate the slab blocksize so that we can allocate at least 32 chunks\n> > > + * from the block.\n> > > + */\n> > > +#define RT_SLAB_BLOCK_SIZE(size) \\\n> > > + Max((SLAB_DEFAULT_BLOCK_SIZE / (size)) * (size), (size) * 32)\n> > >\n> > > The first parameter seems to be trying to make the block size exact,\n> > > but that's not right, because of the chunk header, and maybe\n> > > alignment. If the default block size is big enough to waste only a\n> > > tiny amount of space, let's just use that as-is.\n\n> If we use SLAB_DEFAULT_BLOCK_SIZE (8kB) for each node class, we waste\n> [snip]\n> We might want to calculate a better slab block size for node256 at least.\n\nI meant the macro could probably be\n\nMax(SLAB_DEFAULT_BLOCK_SIZE, (size) * N)\n\n(Right now N=32). I also realize I didn't answer your question earlier\nabout block sizes being powers of two. I was talking about PG in\ngeneral -- I was thinking all block sizes were powers of two. If\nthat's true, I'm not sure if it's because programmers find the macro\ncalculations easy to reason about, or if there was an implementation\nreason for it (e.g. libc behavior). 32*2088 bytes is about 65kB, or\njust above a power of two, so if we did round that up it would be\n128kB.\n\n> > > + * TODO: The caller must be certain that no other backend will attempt to\n> > > + * access the TidStore before calling this function. Other backend must\n> > > + * explicitly call TidStoreDetach() to free up backend-local memory associated\n> > > + * with the TidStore. The backend that calls TidStoreDestroy() must not call\n> > > + * TidStoreDetach().\n> > >\n> > > Do we need to do anything now?\n> >\n> > No, will remove it.\n> >\n>\n> I misunderstood something. I think the above statement is still true\n> but we don't need to do anything at this stage. It's a typical usage\n> that the leader destroys the shared data after confirming all workers\n> are detached. It's not a TODO but probably a NOTE.\n\nOkay.\n\n\n", "msg_date": "Mon, 29 Jan 2024 18:48:27 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Jan 29, 2024 at 8:48 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Mon, Jan 29, 2024 at 2:29 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > > > +/*\n> > > > + * Calculate the slab blocksize so that we can allocate at least 32 chunks\n> > > > + * from the block.\n> > > > + */\n> > > > +#define RT_SLAB_BLOCK_SIZE(size) \\\n> > > > + Max((SLAB_DEFAULT_BLOCK_SIZE / (size)) * (size), (size) * 32)\n> > > >\n> > > > The first parameter seems to be trying to make the block size exact,\n> > > > but that's not right, because of the chunk header, and maybe\n> > > > alignment. If the default block size is big enough to waste only a\n> > > > tiny amount of space, let's just use that as-is.\n>\n> > If we use SLAB_DEFAULT_BLOCK_SIZE (8kB) for each node class, we waste\n> > [snip]\n> > We might want to calculate a better slab block size for node256 at least.\n>\n> I meant the macro could probably be\n>\n> Max(SLAB_DEFAULT_BLOCK_SIZE, (size) * N)\n>\n> (Right now N=32). I also realize I didn't answer your question earlier\n> about block sizes being powers of two. I was talking about PG in\n> general -- I was thinking all block sizes were powers of two. If\n> that's true, I'm not sure if it's because programmers find the macro\n> calculations easy to reason about, or if there was an implementation\n> reason for it (e.g. libc behavior). 32*2088 bytes is about 65kB, or\n> just above a power of two, so if we did round that up it would be\n> 128kB.\n\nThank you for your explanation. It might be better to follow other\ncodes. Does the calculation below make sense to you?\n\nRT_SIZE_CLASS_ELEM size_class = RT_SIZE_CLASS_INFO[i];\nSize inner_blocksize = SLAB_DEFAULT_BLOCK_SIZE;\nwhile (inner_blocksize < 32 * size_class.allocsize)\n inner_blocksize <<= 1;\n\nAs for the lock mode in dsa.c, I've posted a question[1].\n\nRegards,\n\n[1] https://www.postgresql.org/message-id/CAD21AoALgrU2sGWzgq%2B6G9X0ynqyVOjMR5_k4HgsGRWae1j%3DwQ%40mail.gmail.com\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 30 Jan 2024 09:55:35 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 30, 2024 at 7:56 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Jan 29, 2024 at 8:48 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > I meant the macro could probably be\n> >\n> > Max(SLAB_DEFAULT_BLOCK_SIZE, (size) * N)\n> >\n> > (Right now N=32). I also realize I didn't answer your question earlier\n> > about block sizes being powers of two. I was talking about PG in\n> > general -- I was thinking all block sizes were powers of two. If\n> > that's true, I'm not sure if it's because programmers find the macro\n> > calculations easy to reason about, or if there was an implementation\n> > reason for it (e.g. libc behavior). 32*2088 bytes is about 65kB, or\n> > just above a power of two, so if we did round that up it would be\n> > 128kB.\n>\n> Thank you for your explanation. It might be better to follow other\n> codes. Does the calculation below make sense to you?\n>\n> RT_SIZE_CLASS_ELEM size_class = RT_SIZE_CLASS_INFO[i];\n> Size inner_blocksize = SLAB_DEFAULT_BLOCK_SIZE;\n> while (inner_blocksize < 32 * size_class.allocsize)\n> inner_blocksize <<= 1;\n\nIt does make sense, but we can do it more simply:\n\nMax(SLAB_DEFAULT_BLOCK_SIZE, pg_nextpower2_32(size * 32))\n\n\n", "msg_date": "Tue, 30 Jan 2024 17:20:11 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Jan 30, 2024 at 7:20 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Jan 30, 2024 at 7:56 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Jan 29, 2024 at 8:48 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > I meant the macro could probably be\n> > >\n> > > Max(SLAB_DEFAULT_BLOCK_SIZE, (size) * N)\n> > >\n> > > (Right now N=32). I also realize I didn't answer your question earlier\n> > > about block sizes being powers of two. I was talking about PG in\n> > > general -- I was thinking all block sizes were powers of two. If\n> > > that's true, I'm not sure if it's because programmers find the macro\n> > > calculations easy to reason about, or if there was an implementation\n> > > reason for it (e.g. libc behavior). 32*2088 bytes is about 65kB, or\n> > > just above a power of two, so if we did round that up it would be\n> > > 128kB.\n> >\n> > Thank you for your explanation. It might be better to follow other\n> > codes. Does the calculation below make sense to you?\n> >\n> > RT_SIZE_CLASS_ELEM size_class = RT_SIZE_CLASS_INFO[i];\n> > Size inner_blocksize = SLAB_DEFAULT_BLOCK_SIZE;\n> > while (inner_blocksize < 32 * size_class.allocsize)\n> > inner_blocksize <<= 1;\n>\n> It does make sense, but we can do it more simply:\n>\n> Max(SLAB_DEFAULT_BLOCK_SIZE, pg_nextpower2_32(size * 32))\n\nThanks!\n\nI've attached the new patch set (v56). I've squashed previous updates\nand addressed review comments on v55 in separate patches. Here are the\nupdate summary:\n\n0004: fix compiler warning caught by ci test.\n0005-0008: address review comments on radix tree codes.\n0009: cleanup #define and #undef\n0010: use TEST_SHARED_RT macro for shared radix tree test. RT_SHMEM is\nundefined after including radixtree.h so we should not use it in test\ncode.\n0013-0015: address review comments on tidstore codes.\n0017-0018: address review comments on vacuum integration codes.\n\nLooking at overall changes, there are still XXX and TODO comments in\nradixtree.h:\n\n---\n * XXX There are 4 node kinds, and this should never be increased,\n * for several reasons:\n * 1. With 5 or more kinds, gcc tends to use a jump table for switch\n * statements.\n * 2. The 4 kinds can be represented with 2 bits, so we have the option\n * in the future to tag the node pointer with the kind, even on\n * platforms with 32-bit pointers. This might speed up node traversal\n * in trees with highly random node kinds.\n * 3. We can have multiple size classes per node kind.\n\nCan we just remove \"XXX\"?\n\n---\n * WIP: notes about traditional radix tree trading off span vs height...\n\nAre you going to write it?\n\n---\n#ifdef RT_SHMEM\n/* WIP: do we really need this? */\ntypedef dsa_pointer RT_HANDLE;\n#endif\n\nI think it's worth having it.\n\n---\n * WIP: The paper uses at most 64 for this node kind. \"isset\" happens to fit\n * inside a single bitmapword on most platforms, so it's a good starting\n * point. We can make it higher if we need to.\n */\n#define RT_FANOUT_48_MAX (RT_NODE_MAX_SLOTS / 4)\n\nAre you going to work something on this?\n\n---\n /* WIP: We could go first to the higher node16 size class */\n newnode = RT_ALLOC_NODE(tree, RT_NODE_KIND_16, RT_CLASS_16_LO);\n\nDoes it mean to go to RT_CLASS_16_HI and then further go to\nRT_CLASS_16_LO upon further deletion?\n\n---\n * TODO: The current locking mechanism is not optimized for high concurrency\n * with mixed read-write workloads. In the future it might be worthwhile\n * to replace it with the Optimistic Lock Coupling or ROWEX mentioned in\n * the paper \"The ART of Practical Synchronization\" by the same authors as\n * the ART paper, 2016.\n\nI think it's not TODO for now, but a future improvement. We can remove it.\n\n---\n/* TODO: consider 5 with subclass 1 or 2. */\n#define RT_FANOUT_4 4\n\nIs there something we need to do here?\n\n---\n/*\n * Return index of the chunk and slot arrays for inserting into the node,\n * such that the chunk array remains ordered.\n * TODO: Improve performance for non-SIMD platforms.\n */\n\nAre you going to work on this?\n\n---\n/* Delete the element at 'idx' */\n/* TODO: replace slow memmove's */\n\nAre you going to work on this?\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Wed, 31 Jan 2024 14:49:21 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Jan 31, 2024 at 12:50 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> I've attached the new patch set (v56). I've squashed previous updates\n> and addressed review comments on v55 in separate patches. Here are the\n> update summary:\n>\n> 0004: fix compiler warning caught by ci test.\n> 0005-0008: address review comments on radix tree codes.\n> 0009: cleanup #define and #undef\n> 0010: use TEST_SHARED_RT macro for shared radix tree test. RT_SHMEM is\n> undefined after including radixtree.h so we should not use it in test\n> code.\n\nGreat, thanks!\n\nI have a few questions and comments on v56, then I'll address yours\nbelow with the attached v57, which is mostly cosmetic adjustments.\n\nv56-0003:\n\n(Looking closer at tests)\n\n+static const bool rt_test_stats = false;\n\nI'm thinking we should just remove everything that depends on this,\nand keep this module entirely about correctness.\n\n+ for (int shift = 0; shift <= (64 - 8); shift += 8)\n+ test_node_types(shift);\n\nI'm not sure what the test_node_types_* functions are testing that\ntest_basic doesn't. They have a different, and confusing, way to stop\nat every size class and check the keys/values. It seems we can replace\nall that with two more calls (asc/desc) to test_basic, with the\nmaximum level.\n\nIt's pretty hard to see what test_pattern() is doing, or why it's\nuseful. I wonder if instead the test could use something like the\nbenchmark where random integers are masked off. That seems simpler. I\ncan work on that, but I'd like to hear your side about test_pattern().\n\nv56-0007:\n\n+ *\n+ * Since we can rely on DSA_AREA_LOCK to get the total amount of DSA memory,\n+ * the caller doesn't need to take a lock.\n\nMaybe something like \"Since dsa_get_total_size() does appropriate locking ...\"?\n\nv56-0008\n\nThanks, I like how the tests look now.\n\n-NOTICE: testing node 4 with height 0 and ascending keys\n...\n+NOTICE: testing node 1 with height 0 and ascending keys\n\nNow that the number is not intended to match a size class, \"node X\"\nseems out of place. Maybe we could have a separate array with strings?\n\n+ 1, /* RT_CLASS_4 */\n\nThis should be more than one, so that the basic test still exercises\npaths that shift elements around.\n\n+ 100, /* RT_CLASS_48 */\n\nThis node currently holds 64 for local memory.\n\n+ 255 /* RT_CLASS_256 */\n\nThis is the only one where we know exactly how many it can take, so\nmay as well keep it at 256.\n\nv56-0012:\n\nThe test module for tidstore could use a few more comments.\n\nv56-0015:\n\n+typedef dsa_pointer TidStoreHandle;\n+\n\n-TidStoreAttach(dsa_area *area, dsa_pointer rt_dp)\n+TidStoreAttach(dsa_area *area, TidStoreHandle handle)\n {\n TidStore *ts;\n+ dsa_pointer rt_dp = handle;\n\nMy earlier opinion was that \"handle\" was a nicer variable name, but\nthis brings back the typedef and also keeps the variable name I didn't\nlike, but pushes it down into the function. I'm a bit confused, so\nI've kept these not-squashed for now.\n\n-----------------------------------------------------------------------------------\n\nNow, for v57:\n\n> Looking at overall changes, there are still XXX and TODO comments in\n> radixtree.h:\n\nThat's fine, as long as it's intentional as a message to readers. That\nsaid, we can get rid of some:\n\n> ---\n> * XXX There are 4 node kinds, and this should never be increased,\n> * for several reasons:\n> * 1. With 5 or more kinds, gcc tends to use a jump table for switch\n> * statements.\n> * 2. The 4 kinds can be represented with 2 bits, so we have the option\n> * in the future to tag the node pointer with the kind, even on\n> * platforms with 32-bit pointers. This might speed up node traversal\n> * in trees with highly random node kinds.\n> * 3. We can have multiple size classes per node kind.\n>\n> Can we just remove \"XXX\"?\n\nHow about \"NOTE\"?\n\n> ---\n> * WIP: notes about traditional radix tree trading off span vs height...\n>\n> Are you going to write it?\n\nYes, when I draft a rough commit message, (for next time).\n\n> ---\n> #ifdef RT_SHMEM\n> /* WIP: do we really need this? */\n> typedef dsa_pointer RT_HANDLE;\n> #endif\n>\n> I think it's worth having it.\n\nOkay, removed WIP in v57-0004.\n\n> ---\n> * WIP: The paper uses at most 64 for this node kind. \"isset\" happens to fit\n> * inside a single bitmapword on most platforms, so it's a good starting\n> * point. We can make it higher if we need to.\n> */\n> #define RT_FANOUT_48_MAX (RT_NODE_MAX_SLOTS / 4)\n>\n> Are you going to work something on this?\n\nHard-coded 64 for readability, and changed this paragraph to explain\nthe current rationale more clearly:\n\n\"The paper uses at most 64 for this node kind, and one advantage for us\nis that \"isset\" is a single bitmapword on most platforms, rather than\nan array, allowing the compiler to get rid of loops.\"\n\n> ---\n> /* WIP: We could go first to the higher node16 size class */\n> newnode = RT_ALLOC_NODE(tree, RT_NODE_KIND_16, RT_CLASS_16_LO);\n>\n> Does it mean to go to RT_CLASS_16_HI and then further go to\n> RT_CLASS_16_LO upon further deletion?\n\nYes. It wouldn't be much work to make shrinking symmetrical with\ngrowing (a good thing), but it's not essential so I haven't done it\nyet.\n\n> ---\n> * TODO: The current locking mechanism is not optimized for high concurrency\n> * with mixed read-write workloads. In the future it might be worthwhile\n> * to replace it with the Optimistic Lock Coupling or ROWEX mentioned in\n> * the paper \"The ART of Practical Synchronization\" by the same authors as\n> * the ART paper, 2016.\n>\n> I think it's not TODO for now, but a future improvement. We can remove it.\n\nIt _is_ a TODO, regardless of when it happens.\n\n> ---\n> /* TODO: consider 5 with subclass 1 or 2. */\n> #define RT_FANOUT_4 4\n>\n> Is there something we need to do here?\n\nChanged to:\n\n\"To save memory in trees with sparse keys, it would make sense to have two\nsize classes for the smallest kind (perhaps a high class of 5 and a low class\nof 2), but it would be more effective to utilize lazy expansion and\npath compression.\"\n\n> ---\n> /*\n> * Return index of the chunk and slot arrays for inserting into the node,\n> * such that the chunk array remains ordered.\n> * TODO: Improve performance for non-SIMD platforms.\n> */\n>\n> Are you going to work on this?\n\nA small step in v57-0010. I've found a way to kill two birds with one\nstone, by first checking for the case that the keys are inserted in\norder. This also helps the SIMD case because it must branch anyway to\navoid bitscanning a zero bitfield. This moves the branch up and turns\na mask into an assert, looking a bit nicer. I've removed the TODO, but\nmaybe we should add it to the search_eq function.\n\n> ---\n> /* Delete the element at 'idx' */\n> /* TODO: replace slow memmove's */\n>\n> Are you going to work on this?\n\nDone in v57-0011.\n\nThe rest:\nv57-0004 - 0008 should be self explanatory, but questions/pushback welcome.\nv57-0009 - I'm thinking leaves don't need to be memset at all. The\nvalue written should be entirely the caller's responsibility, it\nseems.\nv57-0013 - the bench module can be built locally again\nv57-0016 - minor comment edits in tid store\n\nMy todo:\n- benchmark tid store / vacuum again, since we haven't since varlen\ntypes and removing unnecessary locks. I'm pretty sure there's an\naccidental memset call that crept in there, but I'm running out of\nsteam today.\n- leftover comment etc work", "msg_date": "Fri, 2 Feb 2024 18:47:02 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Feb 2, 2024 at 8:47 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Wed, Jan 31, 2024 at 12:50 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > I've attached the new patch set (v56). I've squashed previous updates\n> > and addressed review comments on v55 in separate patches. Here are the\n> > update summary:\n> >\n> > 0004: fix compiler warning caught by ci test.\n> > 0005-0008: address review comments on radix tree codes.\n> > 0009: cleanup #define and #undef\n> > 0010: use TEST_SHARED_RT macro for shared radix tree test. RT_SHMEM is\n> > undefined after including radixtree.h so we should not use it in test\n> > code.\n>\n> Great, thanks!\n>\n> I have a few questions and comments on v56, then I'll address yours\n> below with the attached v57, which is mostly cosmetic adjustments.\n\nThank you for the comments! I've squashed previous updates and your changes.\n\n>\n> v56-0003:\n>\n> (Looking closer at tests)\n>\n> +static const bool rt_test_stats = false;\n>\n> I'm thinking we should just remove everything that depends on this,\n> and keep this module entirely about correctness.\n\nAgreed. Removed in 0006 patch.\n\n>\n> + for (int shift = 0; shift <= (64 - 8); shift += 8)\n> + test_node_types(shift);\n>\n> I'm not sure what the test_node_types_* functions are testing that\n> test_basic doesn't. They have a different, and confusing, way to stop\n> at every size class and check the keys/values. It seems we can replace\n> all that with two more calls (asc/desc) to test_basic, with the\n> maximum level.\n\nAgreed, addressed in 0007 patch.\n\n>\n> It's pretty hard to see what test_pattern() is doing, or why it's\n> useful. I wonder if instead the test could use something like the\n> benchmark where random integers are masked off. That seems simpler. I\n> can work on that, but I'd like to hear your side about test_pattern().\n\nYeah, test_pattern() is originally created for the integerset so it\ndoesn't necessarily fit the radixtree. I agree to use some tests from\nbenchmarks.\n\n>\n> v56-0007:\n>\n> + *\n> + * Since we can rely on DSA_AREA_LOCK to get the total amount of DSA memory,\n> + * the caller doesn't need to take a lock.\n>\n> Maybe something like \"Since dsa_get_total_size() does appropriate locking ...\"?\n\nAgreed. Fixed in 0005 patch.\n\n>\n> v56-0008\n>\n> Thanks, I like how the tests look now.\n>\n> -NOTICE: testing node 4 with height 0 and ascending keys\n> ...\n> +NOTICE: testing node 1 with height 0 and ascending keys\n>\n> Now that the number is not intended to match a size class, \"node X\"\n> seems out of place. Maybe we could have a separate array with strings?\n>\n> + 1, /* RT_CLASS_4 */\n>\n> This should be more than one, so that the basic test still exercises\n> paths that shift elements around.\n>\n> + 100, /* RT_CLASS_48 */\n>\n> This node currently holds 64 for local memory.\n>\n> + 255 /* RT_CLASS_256 */\n>\n> This is the only one where we know exactly how many it can take, so\n> may as well keep it at 256.\n\nFixed in 0008 patch.\n\n>\n> v56-0012:\n>\n> The test module for tidstore could use a few more comments.\n\nAddressed in 0012 patch.\n\n>\n> v56-0015:\n>\n> +typedef dsa_pointer TidStoreHandle;\n> +\n>\n> -TidStoreAttach(dsa_area *area, dsa_pointer rt_dp)\n> +TidStoreAttach(dsa_area *area, TidStoreHandle handle)\n> {\n> TidStore *ts;\n> + dsa_pointer rt_dp = handle;\n>\n> My earlier opinion was that \"handle\" was a nicer variable name, but\n> this brings back the typedef and also keeps the variable name I didn't\n> like, but pushes it down into the function. I'm a bit confused, so\n> I've kept these not-squashed for now.\n\nI misunderstood your comment. I've changed to use a variable name\nrt_handle and removed the TidStoreHandle type. 0013 patch.\n\n>\n> -----------------------------------------------------------------------------------\n>\n> Now, for v57:\n>\n> > Looking at overall changes, there are still XXX and TODO comments in\n> > radixtree.h:\n>\n> That's fine, as long as it's intentional as a message to readers. That\n> said, we can get rid of some:\n>\n> > ---\n> > * XXX There are 4 node kinds, and this should never be increased,\n> > * for several reasons:\n> > * 1. With 5 or more kinds, gcc tends to use a jump table for switch\n> > * statements.\n> > * 2. The 4 kinds can be represented with 2 bits, so we have the option\n> > * in the future to tag the node pointer with the kind, even on\n> > * platforms with 32-bit pointers. This might speed up node traversal\n> > * in trees with highly random node kinds.\n> > * 3. We can have multiple size classes per node kind.\n> >\n> > Can we just remove \"XXX\"?\n>\n> How about \"NOTE\"?\n\nAgreed.\n\n>\n> > ---\n> > * WIP: notes about traditional radix tree trading off span vs height...\n> >\n> > Are you going to write it?\n>\n> Yes, when I draft a rough commit message, (for next time).\n\nThanks!\n\n>\n> > ---\n> > #ifdef RT_SHMEM\n> > /* WIP: do we really need this? */\n> > typedef dsa_pointer RT_HANDLE;\n> > #endif\n> >\n> > I think it's worth having it.\n>\n> Okay, removed WIP in v57-0004.\n>\n> > ---\n> > * WIP: The paper uses at most 64 for this node kind. \"isset\" happens to fit\n> > * inside a single bitmapword on most platforms, so it's a good starting\n> > * point. We can make it higher if we need to.\n> > */\n> > #define RT_FANOUT_48_MAX (RT_NODE_MAX_SLOTS / 4)\n> >\n> > Are you going to work something on this?\n>\n> Hard-coded 64 for readability, and changed this paragraph to explain\n> the current rationale more clearly:\n>\n> \"The paper uses at most 64 for this node kind, and one advantage for us\n> is that \"isset\" is a single bitmapword on most platforms, rather than\n> an array, allowing the compiler to get rid of loops.\"\n\nLGTM.\n\n>\n> > ---\n> > /* WIP: We could go first to the higher node16 size class */\n> > newnode = RT_ALLOC_NODE(tree, RT_NODE_KIND_16, RT_CLASS_16_LO);\n> >\n> > Does it mean to go to RT_CLASS_16_HI and then further go to\n> > RT_CLASS_16_LO upon further deletion?\n>\n> Yes. It wouldn't be much work to make shrinking symmetrical with\n> growing (a good thing), but it's not essential so I haven't done it\n> yet.\n\nOkay, let's keep it as WIP.\n\n>\n> > ---\n> > * TODO: The current locking mechanism is not optimized for high concurrency\n> > * with mixed read-write workloads. In the future it might be worthwhile\n> > * to replace it with the Optimistic Lock Coupling or ROWEX mentioned in\n> > * the paper \"The ART of Practical Synchronization\" by the same authors as\n> > * the ART paper, 2016.\n> >\n> > I think it's not TODO for now, but a future improvement. We can remove it.\n>\n> It _is_ a TODO, regardless of when it happens.\n\nUnderstood.\n\n>\n> > ---\n> > /* TODO: consider 5 with subclass 1 or 2. */\n> > #define RT_FANOUT_4 4\n> >\n> > Is there something we need to do here?\n>\n> Changed to:\n>\n> \"To save memory in trees with sparse keys, it would make sense to have two\n> size classes for the smallest kind (perhaps a high class of 5 and a low class\n> of 2), but it would be more effective to utilize lazy expansion and\n> path compression.\"\n\nLGTM. But there is an extra '*' in the last line:\n\n+ /*\n:\n+ * of 2), but it would be more effective to utilize lazy expansion and\n+ * path compression.\n+ * */\n\nFixed in 0004 patch.\n\n>\n> > ---\n> > /*\n> > * Return index of the chunk and slot arrays for inserting into the node,\n> > * such that the chunk array remains ordered.\n> > * TODO: Improve performance for non-SIMD platforms.\n> > */\n> >\n> > Are you going to work on this?\n>\n> A small step in v57-0010. I've found a way to kill two birds with one\n> stone, by first checking for the case that the keys are inserted in\n> order. This also helps the SIMD case because it must branch anyway to\n> avoid bitscanning a zero bitfield. This moves the branch up and turns\n> a mask into an assert, looking a bit nicer. I've removed the TODO, but\n> maybe we should add it to the search_eq function.\n\nGreat!\n\n>\n> > ---\n> > /* Delete the element at 'idx' */\n> > /* TODO: replace slow memmove's */\n> >\n> > Are you going to work on this?\n>\n> Done in v57-0011.\n\nLGTM.\n\n>\n> The rest:\n> v57-0004 - 0008 should be self explanatory, but questions/pushback welcome.\n> v57-0009 - I'm thinking leaves don't need to be memset at all. The\n> value written should be entirely the caller's responsibility, it\n> seems.\n> v57-0013 - the bench module can be built locally again\n> v57-0016 - minor comment edits in tid store\n\nThese fixes look good to me.\n\n>\n> My todo:\n> - benchmark tid store / vacuum again, since we haven't since varlen\n> types and removing unnecessary locks. I'm pretty sure there's an\n> accidental memset call that crept in there, but I'm running out of\n> steam today.\n> - leftover comment etc work\n\nThanks. I'm also going to do some benchmarks and tests.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 6 Feb 2024 11:57:58 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Feb 6, 2024 at 9:58 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Fri, Feb 2, 2024 at 8:47 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > My todo:\n> > - benchmark tid store / vacuum again, since we haven't since varlen\n> > types and removing unnecessary locks.\n\nI ran a vacuum benchmark similar to the one in [1] (unlogged tables\nfor reproducibility), but smaller tables (100 million records),\ndeleting only the last 20% of the table, and including a parallel\nvacuum test. Scripts attached.\n\nmonotonically ordered int column index:\n\nmaster:\nsystem usage: CPU: user: 4.27 s, system: 0.41 s, elapsed: 4.70 s\nsystem usage: CPU: user: 4.23 s, system: 0.44 s, elapsed: 4.69 s\nsystem usage: CPU: user: 4.26 s, system: 0.39 s, elapsed: 4.66 s\n\nv-59:\nsystem usage: CPU: user: 3.10 s, system: 0.44 s, elapsed: 3.56 s\nsystem usage: CPU: user: 3.07 s, system: 0.35 s, elapsed: 3.43 s\nsystem usage: CPU: user: 3.07 s, system: 0.36 s, elapsed: 3.44 s\n\nuuid column index:\n\nmaster:\nsystem usage: CPU: user: 18.22 s, system: 1.70 s, elapsed: 20.01 s\nsystem usage: CPU: user: 17.70 s, system: 1.70 s, elapsed: 19.48 s\nsystem usage: CPU: user: 18.48 s, system: 1.59 s, elapsed: 20.43 s\n\nv-59:\nsystem usage: CPU: user: 5.18 s, system: 1.18 s, elapsed: 6.45 s\nsystem usage: CPU: user: 6.56 s, system: 1.39 s, elapsed: 7.99 s\nsystem usage: CPU: user: 6.51 s, system: 1.44 s, elapsed: 8.05 s\n\nint & uuid indexes in parallel:\n\nmaster:\nsystem usage: CPU: user: 4.53 s, system: 1.22 s, elapsed: 20.43 s\nsystem usage: CPU: user: 4.49 s, system: 1.29 s, elapsed: 20.98 s\nsystem usage: CPU: user: 4.46 s, system: 1.33 s, elapsed: 20.50 s\n\nv59:\nsystem usage: CPU: user: 2.09 s, system: 0.32 s, elapsed: 4.86 s\nsystem usage: CPU: user: 3.76 s, system: 0.51 s, elapsed: 8.92 s\nsystem usage: CPU: user: 3.83 s, system: 0.54 s, elapsed: 9.09 s\n\nOver all, I'm pleased with these results, although I'm confused why\nsometimes with the patch the first run reports running faster than the\nothers. I'm curious what others get. Traversing a tree that lives in\nDSA has some overhead, as expected, but still comes out way ahead of\nmaster.\n\nThere are still some micro-benchmarks we could do on tidstore, and\nit'd be good to find out worse-case memory use (1 dead tuple each on\nspread-out pages), but this is decent demonstration.\n\n> > I'm not sure what the test_node_types_* functions are testing that\n> > test_basic doesn't. They have a different, and confusing, way to stop\n> > at every size class and check the keys/values. It seems we can replace\n> > all that with two more calls (asc/desc) to test_basic, with the\n> > maximum level.\n\nv58-0008:\n\n+ /* borrowed from RT_MAX_SHIFT */\n+ const int max_shift = (pg_leftmost_one_pos64(UINT64_MAX) /\nBITS_PER_BYTE) * BITS_PER_BYTE;\n\nThis is harder to read than \"64 - 8\", and doesn't really help\nmaintainability either.\nMaybe \"(sizeof(uint64) - 1) * BITS_PER_BYTE\" is a good compromise.\n\n+ /* leaf nodes */\n+ test_basic(test_info, 0);\n\n+ /* internal nodes */\n+ test_basic(test_info, 8);\n+\n+ /* max-level nodes */\n+ test_basic(test_info, max_shift);\n\nThis three-way terminology is not very informative. How about:\n\n+ /* a tree with one level, i.e. a single node under the root node. */\n ...\n+ /* a tree with two levels */\n ...\n+ /* a tree with the maximum number of levels */\n\n+static void\n+test_basic(rt_node_class_test_elem *test_info, int shift)\n+{\n+ elog(NOTICE, \"testing node %s with shift %d\", test_info->class_name, shift);\n+\n+ /* Test nodes while changing the key insertion order */\n+ do_test_basic(test_info->nkeys, shift, false);\n+ do_test_basic(test_info->nkeys, shift, true);\n\nAdding a level of indirection makes this harder to read, and do we\nstill know whether a test failed in asc or desc keys?\n\n> > My earlier opinion was that \"handle\" was a nicer variable name, but\n> > this brings back the typedef and also keeps the variable name I didn't\n> > like, but pushes it down into the function. I'm a bit confused, so\n> > I've kept these not-squashed for now.\n>\n> I misunderstood your comment. I've changed to use a variable name\n> rt_handle and removed the TidStoreHandle type. 0013 patch.\n\n(diff against an earlier version)\n- pvs->shared->dead_items_handle = TidStoreGetHandle(dead_items);\n+ pvs->shared->dead_items_dp = TidStoreGetHandle(dead_items);\n\nShall we use \"handle\" in vacuum_parallel.c as well?\n\n> > I'm pretty sure there's an\n> > accidental memset call that crept in there, but I'm running out of\n> > steam today.\n\nI have just a little bit of work to add for v59:\n\nv59-0009 - set_offset_bitmap_at() will call memset if it needs to zero\nany bitmapwords. That can only happen if e.g. there is an offset > 128\nand there are none between 64 and 128, so not a huge deal but I think\nit's a bit nicer in this patch.\n\n> > > * WIP: notes about traditional radix tree trading off span vs height...\n> > >\n> > > Are you going to write it?\n> >\n> > Yes, when I draft a rough commit message, (for next time).\n\nI haven't gotten to the commit message, but:\n\nv59-0004 - I did some rewriting of the top header comment to explain\nART concepts for new readers, made small comment changes, and tidied\nup some indentation that pgindent won't touch\nv59-0005 - re-pgindent'ed\n\n\n[1] https://www.postgresql.org/message-id/CAFBsxsHUxmXYy0y4RrhMcNe-R11Bm099Xe-wUdb78pOu0%2BPT2Q%40mail.gmail.com", "msg_date": "Sat, 10 Feb 2024 19:29:10 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sat, Feb 10, 2024 at 9:29 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Feb 6, 2024 at 9:58 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Fri, Feb 2, 2024 at 8:47 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > My todo:\n> > > - benchmark tid store / vacuum again, since we haven't since varlen\n> > > types and removing unnecessary locks.\n>\n> I ran a vacuum benchmark similar to the one in [1] (unlogged tables\n> for reproducibility), but smaller tables (100 million records),\n> deleting only the last 20% of the table, and including a parallel\n> vacuum test. Scripts attached.\n>\n> monotonically ordered int column index:\n>\n> master:\n> system usage: CPU: user: 4.27 s, system: 0.41 s, elapsed: 4.70 s\n> system usage: CPU: user: 4.23 s, system: 0.44 s, elapsed: 4.69 s\n> system usage: CPU: user: 4.26 s, system: 0.39 s, elapsed: 4.66 s\n>\n> v-59:\n> system usage: CPU: user: 3.10 s, system: 0.44 s, elapsed: 3.56 s\n> system usage: CPU: user: 3.07 s, system: 0.35 s, elapsed: 3.43 s\n> system usage: CPU: user: 3.07 s, system: 0.36 s, elapsed: 3.44 s\n>\n> uuid column index:\n>\n> master:\n> system usage: CPU: user: 18.22 s, system: 1.70 s, elapsed: 20.01 s\n> system usage: CPU: user: 17.70 s, system: 1.70 s, elapsed: 19.48 s\n> system usage: CPU: user: 18.48 s, system: 1.59 s, elapsed: 20.43 s\n>\n> v-59:\n> system usage: CPU: user: 5.18 s, system: 1.18 s, elapsed: 6.45 s\n> system usage: CPU: user: 6.56 s, system: 1.39 s, elapsed: 7.99 s\n> system usage: CPU: user: 6.51 s, system: 1.44 s, elapsed: 8.05 s\n>\n> int & uuid indexes in parallel:\n>\n> master:\n> system usage: CPU: user: 4.53 s, system: 1.22 s, elapsed: 20.43 s\n> system usage: CPU: user: 4.49 s, system: 1.29 s, elapsed: 20.98 s\n> system usage: CPU: user: 4.46 s, system: 1.33 s, elapsed: 20.50 s\n>\n> v59:\n> system usage: CPU: user: 2.09 s, system: 0.32 s, elapsed: 4.86 s\n> system usage: CPU: user: 3.76 s, system: 0.51 s, elapsed: 8.92 s\n> system usage: CPU: user: 3.83 s, system: 0.54 s, elapsed: 9.09 s\n>\n> Over all, I'm pleased with these results, although I'm confused why\n> sometimes with the patch the first run reports running faster than the\n> others. I'm curious what others get. Traversing a tree that lives in\n> DSA has some overhead, as expected, but still comes out way ahead of\n> master.\n\nThanks! That's a great improvement.\n\nI've also run the same scripts in my environment just in case and got\nsimilar results:\n\nmonotonically ordered int column index:\n\nmaster:\nsystem usage: CPU: user: 14.81 s, system: 0.90 s, elapsed: 15.74 s\nsystem usage: CPU: user: 14.91 s, system: 0.80 s, elapsed: 15.73 s\nsystem usage: CPU: user: 14.85 s, system: 0.70 s, elapsed: 15.57 s\n\nv-59:\nsystem usage: CPU: user: 9.47 s, system: 1.04 s, elapsed: 10.53 s\nsystem usage: CPU: user: 9.67 s, system: 0.81 s, elapsed: 10.50 s\nsystem usage: CPU: user: 9.59 s, system: 0.86 s, elapsed: 10.47 s\n\nuuid column index:\n\nmaster:\nsystem usage: CPU: user: 28.37 s, system: 1.38 s, elapsed: 29.81 s\nsystem usage: CPU: user: 28.05 s, system: 1.37 s, elapsed: 29.47 s\nsystem usage: CPU: user: 28.46 s, system: 1.36 s, elapsed: 29.88 s\n\nv-59:\nsystem usage: CPU: user: 14.87 s, system: 1.13 s, elapsed: 16.02 s\nsystem usage: CPU: user: 14.84 s, system: 1.31 s, elapsed: 16.18 s\nsystem usage: CPU: user: 10.96 s, system: 1.24 s, elapsed: 12.22 s\n\nint & uuid indexes in parallel:\n\nmaster:\nsystem usage: CPU: user: 15.81 s, system: 1.43 s, elapsed: 34.31 s\nsystem usage: CPU: user: 15.84 s, system: 1.41 s, elapsed: 34.34 s\nsystem usage: CPU: user: 15.92 s, system: 1.39 s, elapsed: 34.33 s\n\nv-59:\nsystem usage: CPU: user: 10.93 s, system: 0.92 s, elapsed: 17.59 s\nsystem usage: CPU: user: 10.92 s, system: 1.20 s, elapsed: 17.58 s\nsystem usage: CPU: user: 10.90 s, system: 1.01 s, elapsed: 17.45 s\n\n>\n> There are still some micro-benchmarks we could do on tidstore, and\n> it'd be good to find out worse-case memory use (1 dead tuple each on\n> spread-out pages), but this is decent demonstration.\n\nI've tested a simple case where vacuum removes 33k dead tuples spread\nabout every 10 pages.\n\nmaster:\n198,000 bytes (=33000 * 6)\nsystem usage: CPU: user: 29.49 s, system: 0.88 s, elapsed: 30.40 s\n\nv-59:\n2,834,432 bytes (reported by TidStoreMemoryUsage())\nsystem usage: CPU: user: 15.96 s, system: 0.89 s, elapsed: 16.88 s\n\n>\n> > > I'm not sure what the test_node_types_* functions are testing that\n> > > test_basic doesn't. They have a different, and confusing, way to stop\n> > > at every size class and check the keys/values. It seems we can replace\n> > > all that with two more calls (asc/desc) to test_basic, with the\n> > > maximum level.\n>\n> v58-0008:\n>\n> + /* borrowed from RT_MAX_SHIFT */\n> + const int max_shift = (pg_leftmost_one_pos64(UINT64_MAX) /\n> BITS_PER_BYTE) * BITS_PER_BYTE;\n>\n> This is harder to read than \"64 - 8\", and doesn't really help\n> maintainability either.\n> Maybe \"(sizeof(uint64) - 1) * BITS_PER_BYTE\" is a good compromise.\n>\n> + /* leaf nodes */\n> + test_basic(test_info, 0);\n>\n> + /* internal nodes */\n> + test_basic(test_info, 8);\n> +\n> + /* max-level nodes */\n> + test_basic(test_info, max_shift);\n>\n> This three-way terminology is not very informative. How about:\n>\n> + /* a tree with one level, i.e. a single node under the root node. */\n> ...\n> + /* a tree with two levels */\n> ...\n> + /* a tree with the maximum number of levels */\n\nAgreed.\n\n>\n> +static void\n> +test_basic(rt_node_class_test_elem *test_info, int shift)\n> +{\n> + elog(NOTICE, \"testing node %s with shift %d\", test_info->class_name, shift);\n> +\n> + /* Test nodes while changing the key insertion order */\n> + do_test_basic(test_info->nkeys, shift, false);\n> + do_test_basic(test_info->nkeys, shift, true);\n>\n> Adding a level of indirection makes this harder to read, and do we\n> still know whether a test failed in asc or desc keys?\n\nAgreed, it seems to be better to keep the previous logging style.\n\n>\n> > > My earlier opinion was that \"handle\" was a nicer variable name, but\n> > > this brings back the typedef and also keeps the variable name I didn't\n> > > like, but pushes it down into the function. I'm a bit confused, so\n> > > I've kept these not-squashed for now.\n> >\n> > I misunderstood your comment. I've changed to use a variable name\n> > rt_handle and removed the TidStoreHandle type. 0013 patch.\n>\n> (diff against an earlier version)\n> - pvs->shared->dead_items_handle = TidStoreGetHandle(dead_items);\n> + pvs->shared->dead_items_dp = TidStoreGetHandle(dead_items);\n>\n> Shall we use \"handle\" in vacuum_parallel.c as well?\n\nAgreed.\n\n>\n> > > I'm pretty sure there's an\n> > > accidental memset call that crept in there, but I'm running out of\n> > > steam today.\n>\n> I have just a little bit of work to add for v59:\n>\n> v59-0009 - set_offset_bitmap_at() will call memset if it needs to zero\n> any bitmapwords. That can only happen if e.g. there is an offset > 128\n> and there are none between 64 and 128, so not a huge deal but I think\n> it's a bit nicer in this patch.\n\nLGTM.\n\n>\n> > > > * WIP: notes about traditional radix tree trading off span vs height...\n> > > >\n> > > > Are you going to write it?\n> > >\n> > > Yes, when I draft a rough commit message, (for next time).\n>\n> I haven't gotten to the commit message, but:\n\nI've drafted the commit message.\n\n>\n> v59-0004 - I did some rewriting of the top header comment to explain\n> ART concepts for new readers, made small comment changes, and tidied\n> up some indentation that pgindent won't touch\n> v59-0005 - re-pgindent'ed\n\nLGTM, squashed all changes.\n\nI've attached these updates from v59 in separate patches.\n\nI've run regression tests with valgrind and run the coverity scan, and\nI don't see critical issues.\n\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 15 Feb 2024 12:20:46 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Feb 15, 2024 at 10:21 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Sat, Feb 10, 2024 at 9:29 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> I've also run the same scripts in my environment just in case and got\n> similar results:\n\nThanks for testing, looks good as well.\n\n> > There are still some micro-benchmarks we could do on tidstore, and\n> > it'd be good to find out worse-case memory use (1 dead tuple each on\n> > spread-out pages), but this is decent demonstration.\n>\n> I've tested a simple case where vacuum removes 33k dead tuples spread\n> about every 10 pages.\n>\n> master:\n> 198,000 bytes (=33000 * 6)\n> system usage: CPU: user: 29.49 s, system: 0.88 s, elapsed: 30.40 s\n>\n> v-59:\n> 2,834,432 bytes (reported by TidStoreMemoryUsage())\n> system usage: CPU: user: 15.96 s, system: 0.89 s, elapsed: 16.88 s\n\nThe memory usage for the sparse case may be a concern, although it's\nnot bad -- a multiple of something small is probably not huge in\npractice. See below for an option we have for this.\n\n> > > > I'm pretty sure there's an\n> > > > accidental memset call that crept in there, but I'm running out of\n> > > > steam today.\n> >\n> > I have just a little bit of work to add for v59:\n> >\n> > v59-0009 - set_offset_bitmap_at() will call memset if it needs to zero\n> > any bitmapwords. That can only happen if e.g. there is an offset > 128\n> > and there are none between 64 and 128, so not a huge deal but I think\n> > it's a bit nicer in this patch.\n>\n> LGTM.\n\nOkay, I've squashed this.\n\n> I've drafted the commit message.\n\nThanks, this is a good start.\n\n> I've run regression tests with valgrind and run the coverity scan, and\n> I don't see critical issues.\n\nGreat!\n\nNow, I think we're in pretty good shape. There are a couple of things\nthat might be objectionable, so I want to try to improve them in the\nlittle time we have:\n\n1. Memory use for the sparse case. I shared an idea a few months ago\nof how runtime-embeddable values (true combined pointer-value slots)\ncould work for tids. I don't think this is a must-have, but it's not a\nlot of code, and I have this working:\n\nv61-0006: Preparatory refactoring -- I think we should do this anyway,\nsince the intent seems more clear to me.\nv61-0007: Runtime-embeddable tids -- Optional for v17, but should\nreduce memory regressions, so should be considered. Up to 3 tids can\nbe stored in the last level child pointer. It's not polished, but I'll\nonly proceed with that if we think we need this. \"flags\" iis called\nthat because it could hold tidbitmap.c booleans (recheck, lossy) in\nthe future, in addition to reserving space for the pointer tag. Note:\nI hacked the tests to only have 2 offsets per block to demo, but of\ncourse both paths should be tested.\n\n2. Management of memory contexts. It's pretty verbose and messy. I\nthink the abstraction could be better:\nA: tidstore currently passes CurrentMemoryContext to RT_CREATE, so we\ncan't destroy or reset it. That means we have to do a lot of manual\nwork.\nB: Passing \"max_bytes\" to the radix tree was my idea, I believe, but\nit seems the wrong responsibility. Not all uses will have a\nwork_mem-type limit, I'm guessing. We only use it for limiting the max\nblock size, and aset's default 8MB is already plenty small for\nvacuum's large limit anyway. tidbitmap.c's limit is work_mem, so\nsmaller, and there it makes sense to limit the max blocksize this way.\nC: The context for values has complex #ifdefs based on the value\nlength/varlen, but it's both too much and not enough. If we get a bump\ncontext, how would we shoehorn that in for values for vacuum but not\nfor tidbitmap?\n\nHere's an idea: Have vacuum (or tidbitmap etc.) pass a context to\nTidStoreCreate(), and then to RT_CREATE. That context will contain the\nvalues (for local mem), and the node slabs will be children of the\nvalue context. That way, measuring memory usage and free-ing can just\ncall with this parent context, and let recursion handle the rest.\nPerhaps the passed context can also hold the radix-tree struct, but\nI'm not sure since I haven't tried it. What do you think?\n\nWith this resolved, I think the radix tree is pretty close to\ncommittable. The tid store will likely need some polish yet, but no\nmajor issues I know of.\n\n(And, finally, a small thing I that I wanted to share just so I don't\nforget, but maybe not worth the attention: In Andres's prototype,\nthere is a comment wondering if an update can skip checking if it\nfirst need to create a root node. This is pretty easy, and done in\nv61-0008.)", "msg_date": "Thu, 15 Feb 2024 18:26:13 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "v61 had a brown-paper-bag bug in the embedded tids patch that didn't\npresent in the tidstore test, but caused vacuum to fail, fixed in v62.", "msg_date": "Fri, 16 Feb 2024 07:51:39 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Feb 15, 2024 at 8:26 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Feb 15, 2024 at 10:21 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Sat, Feb 10, 2024 at 9:29 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > I've also run the same scripts in my environment just in case and got\n> > similar results:\n>\n> Thanks for testing, looks good as well.\n>\n> > > There are still some micro-benchmarks we could do on tidstore, and\n> > > it'd be good to find out worse-case memory use (1 dead tuple each on\n> > > spread-out pages), but this is decent demonstration.\n> >\n> > I've tested a simple case where vacuum removes 33k dead tuples spread\n> > about every 10 pages.\n> >\n> > master:\n> > 198,000 bytes (=33000 * 6)\n> > system usage: CPU: user: 29.49 s, system: 0.88 s, elapsed: 30.40 s\n> >\n> > v-59:\n> > 2,834,432 bytes (reported by TidStoreMemoryUsage())\n> > system usage: CPU: user: 15.96 s, system: 0.89 s, elapsed: 16.88 s\n>\n> The memory usage for the sparse case may be a concern, although it's\n> not bad -- a multiple of something small is probably not huge in\n> practice. See below for an option we have for this.\n>\n> > > > > I'm pretty sure there's an\n> > > > > accidental memset call that crept in there, but I'm running out of\n> > > > > steam today.\n> > >\n> > > I have just a little bit of work to add for v59:\n> > >\n> > > v59-0009 - set_offset_bitmap_at() will call memset if it needs to zero\n> > > any bitmapwords. That can only happen if e.g. there is an offset > 128\n> > > and there are none between 64 and 128, so not a huge deal but I think\n> > > it's a bit nicer in this patch.\n> >\n> > LGTM.\n>\n> Okay, I've squashed this.\n>\n> > I've drafted the commit message.\n>\n> Thanks, this is a good start.\n>\n> > I've run regression tests with valgrind and run the coverity scan, and\n> > I don't see critical issues.\n>\n> Great!\n>\n> Now, I think we're in pretty good shape. There are a couple of things\n> that might be objectionable, so I want to try to improve them in the\n> little time we have:\n>\n> 1. Memory use for the sparse case. I shared an idea a few months ago\n> of how runtime-embeddable values (true combined pointer-value slots)\n> could work for tids. I don't think this is a must-have, but it's not a\n> lot of code, and I have this working:\n>\n> v61-0006: Preparatory refactoring -- I think we should do this anyway,\n> since the intent seems more clear to me.\n\nLooks good refactoring to me.\n\n> v61-0007: Runtime-embeddable tids -- Optional for v17, but should\n> reduce memory regressions, so should be considered. Up to 3 tids can\n> be stored in the last level child pointer. It's not polished, but I'll\n> only proceed with that if we think we need this. \"flags\" iis called\n> that because it could hold tidbitmap.c booleans (recheck, lossy) in\n> the future, in addition to reserving space for the pointer tag. Note:\n> I hacked the tests to only have 2 offsets per block to demo, but of\n> course both paths should be tested.\n\nInteresting. I've run the same benchmark tests we did[1][2] (the\nmedian of 3 runs):\n\nmonotonically ordered int column index:\n\nmaster: system usage: CPU: user: 14.91 s, system: 0.80 s, elapsed: 15.73 s\nv-59: system usage: CPU: user: 9.67 s, system: 0.81 s, elapsed: 10.50 s\nv-62: system usage: CPU: user: 1.94 s, system: 0.69 s, elapsed: 2.64 s\n\nuuid column index:\n\nmaster: system usage: CPU: user: 28.37 s, system: 1.38 s, elapsed: 29.81 s\nv-59: system usage: CPU: user: 14.84 s, system: 1.31 s, elapsed: 16.18 s\nv-62: system usage: CPU: user: 4.06 s, system: 0.98 s, elapsed: 5.06 s\n\nint & uuid indexes in parallel:\n\nmaster: system usage: CPU: user: 15.92 s, system: 1.39 s, elapsed: 34.33 s\nv-59: system usage: CPU: user: 10.92 s, system: 1.20 s, elapsed: 17.58 s\nv-62: system usage: CPU: user: 2.54 s, system: 0.94 s, elapsed: 6.00 s\n\nsparse case:\n\nmaster:\n198,000 bytes (=33000 * 6)\nsystem usage: CPU: user: 29.49 s, system: 0.88 s, elapsed: 30.40 s\n\nv-59:\n2,834,432 bytes (reported by TidStoreMemoryUsage())\nsystem usage: CPU: user: 15.96 s, system: 0.89 s, elapsed: 16.88 s\n\nv-62:\n729,088 bytes (reported by TidStoreMemoryUsage())\nsystem usage: CPU: user: 4.63 s, system: 0.86 s, elapsed: 5.50 s\n\nI'm happy to see a huge improvement. While it's really fascinating to\nme, I'm concerned about the time left until the feature freeze. We\nneed to polish both tidstore and vacuum integration patches in 5\nweeks. Personally I'd like to have it as a separate patch for now, and\nfocus on completing the main three patches since we might face some\nissues after pushing these patches. I think with 0007 patch it's a big\nwin but it's still a win even without 0007 patch.\n\n>\n> 2. Management of memory contexts. It's pretty verbose and messy. I\n> think the abstraction could be better:\n> A: tidstore currently passes CurrentMemoryContext to RT_CREATE, so we\n> can't destroy or reset it. That means we have to do a lot of manual\n> work.\n> B: Passing \"max_bytes\" to the radix tree was my idea, I believe, but\n> it seems the wrong responsibility. Not all uses will have a\n> work_mem-type limit, I'm guessing. We only use it for limiting the max\n> block size, and aset's default 8MB is already plenty small for\n> vacuum's large limit anyway. tidbitmap.c's limit is work_mem, so\n> smaller, and there it makes sense to limit the max blocksize this way.\n> C: The context for values has complex #ifdefs based on the value\n> length/varlen, but it's both too much and not enough. If we get a bump\n> context, how would we shoehorn that in for values for vacuum but not\n> for tidbitmap?\n>\n> Here's an idea: Have vacuum (or tidbitmap etc.) pass a context to\n> TidStoreCreate(), and then to RT_CREATE. That context will contain the\n> values (for local mem), and the node slabs will be children of the\n> value context. That way, measuring memory usage and free-ing can just\n> call with this parent context, and let recursion handle the rest.\n> Perhaps the passed context can also hold the radix-tree struct, but\n> I'm not sure since I haven't tried it. What do you think?\n\nIf I understand your idea correctly, RT_CREATE() creates the context\nfor values as a child of the passed context and the node slabs as\nchildren of the value context. That way, measuring memory usage can\njust call with the value context. It sounds like a good idea. But it\nwas not clear to me how to address point B and C.\n\nAnother variant of this idea would be that RT_CREATE() creates the\nparent context of the value context to store radix-tree struct. That\nis, the hierarchy would be like:\n\nA MemoryContext (passed by vacuum through tidstore)\n - radix tree memory context (store radx-tree struct, control\nstruct, and iterator)\n - value context (aset, slab, or bump)\n - node slab contexts\n\nFreeing can just call with the radix tree memory context. And perhaps\nit works even if tidstore passes CurrentMemoryContex to RT_CREATE()?\n\n>\n> With this resolved, I think the radix tree is pretty close to\n> committable. The tid store will likely need some polish yet, but no\n> major issues I know of.\n\nAgreed.\n\n>\n> (And, finally, a small thing I that I wanted to share just so I don't\n> forget, but maybe not worth the attention: In Andres's prototype,\n> there is a comment wondering if an update can skip checking if it\n> first need to create a root node. This is pretty easy, and done in\n> v61-0008.)\n\nLGTM, thanks!\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 16 Feb 2024 12:04:38 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Feb 16, 2024 at 10:05 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > v61-0007: Runtime-embeddable tids -- Optional for v17, but should\n> > reduce memory regressions, so should be considered. Up to 3 tids can\n> > be stored in the last level child pointer. It's not polished, but I'll\n> > only proceed with that if we think we need this. \"flags\" iis called\n> > that because it could hold tidbitmap.c booleans (recheck, lossy) in\n> > the future, in addition to reserving space for the pointer tag. Note:\n> > I hacked the tests to only have 2 offsets per block to demo, but of\n> > course both paths should be tested.\n>\n> Interesting. I've run the same benchmark tests we did[1][2] (the\n> median of 3 runs):\n>\n> monotonically ordered int column index:\n>\n> master: system usage: CPU: user: 14.91 s, system: 0.80 s, elapsed: 15.73 s\n> v-59: system usage: CPU: user: 9.67 s, system: 0.81 s, elapsed: 10.50 s\n> v-62: system usage: CPU: user: 1.94 s, system: 0.69 s, elapsed: 2.64 s\n\nHmm, that's strange -- this test is intended to delete all records\nfrom the last 20% of the blocks, so I wouldn't expect any improvement\nhere, only in the sparse case. Maybe something is wrong. All the more\nreason to put it off...\n\n> I'm happy to see a huge improvement. While it's really fascinating to\n> me, I'm concerned about the time left until the feature freeze. We\n> need to polish both tidstore and vacuum integration patches in 5\n> weeks. Personally I'd like to have it as a separate patch for now, and\n> focus on completing the main three patches since we might face some\n> issues after pushing these patches. I think with 0007 patch it's a big\n> win but it's still a win even without 0007 patch.\n\nAgreed to not consider it for initial commit. I'll hold on to it for\nsome future time.\n\n> > 2. Management of memory contexts. It's pretty verbose and messy. I\n> > think the abstraction could be better:\n> > A: tidstore currently passes CurrentMemoryContext to RT_CREATE, so we\n> > can't destroy or reset it. That means we have to do a lot of manual\n> > work.\n> > B: Passing \"max_bytes\" to the radix tree was my idea, I believe, but\n> > it seems the wrong responsibility. Not all uses will have a\n> > work_mem-type limit, I'm guessing. We only use it for limiting the max\n> > block size, and aset's default 8MB is already plenty small for\n> > vacuum's large limit anyway. tidbitmap.c's limit is work_mem, so\n> > smaller, and there it makes sense to limit the max blocksize this way.\n> > C: The context for values has complex #ifdefs based on the value\n> > length/varlen, but it's both too much and not enough. If we get a bump\n> > context, how would we shoehorn that in for values for vacuum but not\n> > for tidbitmap?\n> >\n> > Here's an idea: Have vacuum (or tidbitmap etc.) pass a context to\n> > TidStoreCreate(), and then to RT_CREATE. That context will contain the\n> > values (for local mem), and the node slabs will be children of the\n> > value context. That way, measuring memory usage and free-ing can just\n> > call with this parent context, and let recursion handle the rest.\n> > Perhaps the passed context can also hold the radix-tree struct, but\n> > I'm not sure since I haven't tried it. What do you think?\n>\n> If I understand your idea correctly, RT_CREATE() creates the context\n> for values as a child of the passed context and the node slabs as\n> children of the value context. That way, measuring memory usage can\n> just call with the value context. It sounds like a good idea. But it\n> was not clear to me how to address point B and C.\n\nFor B & C, vacuum would create a context to pass to TidStoreCreate,\nand it wouldn't need to bother changing max block size. RT_CREATE\nwould use that directly for leaves (if any), and would only create\nchild slab contexts under it. It would not need to know about\nmax_bytes. Modifyng your diagram a bit, something like:\n\n- caller-supplied radix tree memory context (the 3 structs -- and\nleaves, if any) (aset (or future bump?))\n - node slab contexts\n\nThis might only be workable with aset, if we need to individually free\nthe structs. (I haven't studied this, it was a recent idea)\nIt's simpler, because with small fixed length values, we don't need to\ndetect that and avoid creating a leaf context. All leaves would live\nin the same context as the structs.\n\n> Another variant of this idea would be that RT_CREATE() creates the\n> parent context of the value context to store radix-tree struct. That\n> is, the hierarchy would be like:\n>\n> A MemoryContext (passed by vacuum through tidstore)\n> - radix tree memory context (store radx-tree struct, control\n> struct, and iterator)\n> - value context (aset, slab, or bump)\n> - node slab contexts\n\nThe template handling the value context here is complex, and is what I\nmeant by 'C' above. Most fixed length allocations in all of the\nbackend are aset, so it seems fine to use it always.\n\n> Freeing can just call with the radix tree memory context. And perhaps\n> it works even if tidstore passes CurrentMemoryContex to RT_CREATE()?\n\nSeems like it would, but would keep some complexity, as I mentioned.\n\n\n", "msg_date": "Fri, 16 Feb 2024 10:41:13 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Feb 16, 2024 at 12:41 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Fri, Feb 16, 2024 at 10:05 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > v61-0007: Runtime-embeddable tids -- Optional for v17, but should\n> > > reduce memory regressions, so should be considered. Up to 3 tids can\n> > > be stored in the last level child pointer. It's not polished, but I'll\n> > > only proceed with that if we think we need this. \"flags\" iis called\n> > > that because it could hold tidbitmap.c booleans (recheck, lossy) in\n> > > the future, in addition to reserving space for the pointer tag. Note:\n> > > I hacked the tests to only have 2 offsets per block to demo, but of\n> > > course both paths should be tested.\n> >\n> > Interesting. I've run the same benchmark tests we did[1][2] (the\n> > median of 3 runs):\n> >\n> > monotonically ordered int column index:\n> >\n> > master: system usage: CPU: user: 14.91 s, system: 0.80 s, elapsed: 15.73 s\n> > v-59: system usage: CPU: user: 9.67 s, system: 0.81 s, elapsed: 10.50 s\n> > v-62: system usage: CPU: user: 1.94 s, system: 0.69 s, elapsed: 2.64 s\n>\n> Hmm, that's strange -- this test is intended to delete all records\n> from the last 20% of the blocks, so I wouldn't expect any improvement\n> here, only in the sparse case. Maybe something is wrong. All the more\n> reason to put it off...\n\nOkay, let's dig it deeper later.\n\n>\n> > I'm happy to see a huge improvement. While it's really fascinating to\n> > me, I'm concerned about the time left until the feature freeze. We\n> > need to polish both tidstore and vacuum integration patches in 5\n> > weeks. Personally I'd like to have it as a separate patch for now, and\n> > focus on completing the main three patches since we might face some\n> > issues after pushing these patches. I think with 0007 patch it's a big\n> > win but it's still a win even without 0007 patch.\n>\n> Agreed to not consider it for initial commit. I'll hold on to it for\n> some future time.\n>\n> > > 2. Management of memory contexts. It's pretty verbose and messy. I\n> > > think the abstraction could be better:\n> > > A: tidstore currently passes CurrentMemoryContext to RT_CREATE, so we\n> > > can't destroy or reset it. That means we have to do a lot of manual\n> > > work.\n> > > B: Passing \"max_bytes\" to the radix tree was my idea, I believe, but\n> > > it seems the wrong responsibility. Not all uses will have a\n> > > work_mem-type limit, I'm guessing. We only use it for limiting the max\n> > > block size, and aset's default 8MB is already plenty small for\n> > > vacuum's large limit anyway. tidbitmap.c's limit is work_mem, so\n> > > smaller, and there it makes sense to limit the max blocksize this way.\n> > > C: The context for values has complex #ifdefs based on the value\n> > > length/varlen, but it's both too much and not enough. If we get a bump\n> > > context, how would we shoehorn that in for values for vacuum but not\n> > > for tidbitmap?\n> > >\n> > > Here's an idea: Have vacuum (or tidbitmap etc.) pass a context to\n> > > TidStoreCreate(), and then to RT_CREATE. That context will contain the\n> > > values (for local mem), and the node slabs will be children of the\n> > > value context. That way, measuring memory usage and free-ing can just\n> > > call with this parent context, and let recursion handle the rest.\n> > > Perhaps the passed context can also hold the radix-tree struct, but\n> > > I'm not sure since I haven't tried it. What do you think?\n> >\n> > If I understand your idea correctly, RT_CREATE() creates the context\n> > for values as a child of the passed context and the node slabs as\n> > children of the value context. That way, measuring memory usage can\n> > just call with the value context. It sounds like a good idea. But it\n> > was not clear to me how to address point B and C.\n>\n> For B & C, vacuum would create a context to pass to TidStoreCreate,\n> and it wouldn't need to bother changing max block size. RT_CREATE\n> would use that directly for leaves (if any), and would only create\n> child slab contexts under it. It would not need to know about\n> max_bytes. Modifyng your diagram a bit, something like:\n>\n> - caller-supplied radix tree memory context (the 3 structs -- and\n> leaves, if any) (aset (or future bump?))\n> - node slab contexts\n>\n> This might only be workable with aset, if we need to individually free\n> the structs. (I haven't studied this, it was a recent idea)\n> It's simpler, because with small fixed length values, we don't need to\n> detect that and avoid creating a leaf context. All leaves would live\n> in the same context as the structs.\n\nThank you for the explanation.\n\nI think that vacuum and tidbitmap (and future users) would end up\nhaving the same max block size calculation. And it seems slightly odd\nlayering to me that max-block-size-specified context is created on\nvacuum (or tidbitmap) layer, a varlen-value radix tree is created by\ntidstore layer, and the passed context is used for leaves (if\nvarlen-value is used) on radix tree layer. Another idea is to create a\nmax-block-size-specified context on the tidstore layer. That is,\nvacuum and tidbitmap pass a work_mem and a flag indicating whether the\ntidstore can use the bump context, and tidstore creates a (aset of\nbump) memory context with the calculated max block size and passes it\nto the radix tree.\n\nAs for using the bump memory context, I feel that we need to store\niterator struct in aset context at least as it can be individually\nfreed and re-created. Or it might not be necessary to allocate the\niterator struct in the same context as radix tree.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 19 Feb 2024 11:01:45 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Feb 19, 2024 at 9:02 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> I think that vacuum and tidbitmap (and future users) would end up\n> having the same max block size calculation. And it seems slightly odd\n> layering to me that max-block-size-specified context is created on\n> vacuum (or tidbitmap) layer, a varlen-value radix tree is created by\n> tidstore layer, and the passed context is used for leaves (if\n> varlen-value is used) on radix tree layer.\n\nThat sounds slightly more complicated than I was thinking of, but we\ncould actually be talking about the same thing: I'm drawing a\ndistinction between \"used = must be detected / #ifdef'd\" and \"used =\nactually happens to call allocation\". I meant that the passed context\nwould _always_ be used for leaves, regardless of varlen or not. So\nwith fixed-length values short enough to live in child pointer slots,\nthat context would still be used for iteration etc.\n\n> Another idea is to create a\n> max-block-size-specified context on the tidstore layer. That is,\n> vacuum and tidbitmap pass a work_mem and a flag indicating whether the\n> tidstore can use the bump context, and tidstore creates a (aset of\n> bump) memory context with the calculated max block size and passes it\n> to the radix tree.\n\nThat might be a better abstraction since both uses have some memory limit.\n\n> As for using the bump memory context, I feel that we need to store\n> iterator struct in aset context at least as it can be individually\n> freed and re-created. Or it might not be necessary to allocate the\n> iterator struct in the same context as radix tree.\n\nOkay, that's one thing I was concerned about. Since we don't actually\nhave a bump context yet, it seems simple to assume aset for non-nodes,\nand if we do get it, we can adjust slightly. Anyway, this seems like a\ngood thing to try to clean up, but it's also not a show-stopper.\n\nOn that note: I will be going on honeymoon shortly, and then to PGConf\nIndia, so I will have sporadic connectivity for the next 10 days and\nwon't be doing any hacking during that time.\n\nAndres, did you want to take a look at the radix tree patch 0003?\nAside from the above possible cleanup, most of it should be stable.\n\n\n", "msg_date": "Mon, 19 Feb 2024 17:47:15 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Feb 19, 2024 at 7:47 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Mon, Feb 19, 2024 at 9:02 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > I think that vacuum and tidbitmap (and future users) would end up\n> > having the same max block size calculation. And it seems slightly odd\n> > layering to me that max-block-size-specified context is created on\n> > vacuum (or tidbitmap) layer, a varlen-value radix tree is created by\n> > tidstore layer, and the passed context is used for leaves (if\n> > varlen-value is used) on radix tree layer.\n>\n> That sounds slightly more complicated than I was thinking of, but we\n> could actually be talking about the same thing: I'm drawing a\n> distinction between \"used = must be detected / #ifdef'd\" and \"used =\n> actually happens to call allocation\". I meant that the passed context\n> would _always_ be used for leaves, regardless of varlen or not. So\n> with fixed-length values short enough to live in child pointer slots,\n> that context would still be used for iteration etc.\n>\n> > Another idea is to create a\n> > max-block-size-specified context on the tidstore layer. That is,\n> > vacuum and tidbitmap pass a work_mem and a flag indicating whether the\n> > tidstore can use the bump context, and tidstore creates a (aset of\n> > bump) memory context with the calculated max block size and passes it\n> > to the radix tree.\n>\n> That might be a better abstraction since both uses have some memory limit.\n\nI've drafted this idea, and fixed a bug in tidstore.c. Here is the\nsummary of updates from v62:\n\n- removed v62-0007 patch as we discussed\n- squashed v62-0006 and v62-0008 patches into 0003 patch\n- v63-0008 patch fixes a bug in tidstore.\n- v63-0009 patch is a draft idea of cleanup memory context handling.\n\n>\n> > As for using the bump memory context, I feel that we need to store\n> > iterator struct in aset context at least as it can be individually\n> > freed and re-created. Or it might not be necessary to allocate the\n> > iterator struct in the same context as radix tree.\n>\n> Okay, that's one thing I was concerned about. Since we don't actually\n> have a bump context yet, it seems simple to assume aset for non-nodes,\n> and if we do get it, we can adjust slightly. Anyway, this seems like a\n> good thing to try to clean up, but it's also not a show-stopper.\n>\n> On that note: I will be going on honeymoon shortly, and then to PGConf\n> India, so I will have sporadic connectivity for the next 10 days and\n> won't be doing any hacking during that time.\n\nThank you for letting us know. Enjoy yourself!\n\nRegards\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 20 Feb 2024 15:59:04 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Feb 20, 2024 at 1:59 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> - v63-0008 patch fixes a bug in tidstore.\n\n- page->nwords = wordnum + 1;\n- Assert(page->nwords = WORDS_PER_PAGE(offsets[num_offsets - 1]));\n+ page->nwords = wordnum;\n+ Assert(page->nwords == WORDS_PER_PAGE(offsets[num_offsets - 1]));\n\nYikes, I'm guessing this failed in a non-assert builds? I wonder why\nmy compiler didn't yell at me... Have you tried a tidstore-debug build\nwithout asserts?\n\n> - v63-0009 patch is a draft idea of cleanup memory context handling.\n\nThanks, looks pretty good!\n\n+ ts->rt_context = AllocSetContextCreate(CurrentMemoryContext,\n+ \"tidstore storage\",\n\n\"tidstore storage\" sounds a bit strange -- maybe look at some other\ncontext names for ideas.\n\n- leaf.alloc = (RT_PTR_ALLOC) MemoryContextAlloc(tree->leaf_ctx, allocsize);\n+ leaf.alloc = (RT_PTR_ALLOC) MemoryContextAlloc(tree->leaf_ctx != NULL\n+ ? tree->leaf_ctx\n+ : tree->context, allocsize);\n\nInstead of branching here, can we copy \"context\" to \"leaf_ctx\" when\nnecessary (those names should look more like eachother, btw)? I think\nthat means anything not covered by this case:\n\n+#ifndef RT_VARLEN_VALUE_SIZE\n+ if (sizeof(RT_VALUE_TYPE) > sizeof(RT_PTR_ALLOC))\n+ tree->leaf_ctx = SlabContextCreate(ctx,\n+ RT_STR(RT_PREFIX) \"radix_tree leaf contex\",\n+ RT_SLAB_BLOCK_SIZE(sizeof(RT_VALUE_TYPE)),\n+ sizeof(RT_VALUE_TYPE));\n+#endif /* !RT_VARLEN_VALUE_SIZE */\n\n...also, we should document why we're using slab here. On that, I\ndon't recall why we are? We've never had a fixed-length type test case\non 64-bit, so it wasn't because it won through benchmarking. It seems\na hold-over from the days of \"multi-value leaves\". Is it to avoid the\npossibility of space wastage with non-power-of-two size types?\n\nFor this stanza that remains unchanged:\n\nfor (int i = 0; i < RT_SIZE_CLASS_COUNT; i++)\n{\n MemoryContextDelete(tree->node_slabs[i]);\n}\n\nif (tree->leaf_ctx)\n{\n MemoryContextDelete(tree->leaf_ctx);\n}\n\n...is there a reason we can't just delete tree->ctx, and let that\nrecursively delete child contexts?\n\nSecondly, I thought about my recent work to skip checking if we first\nneed to create a root node, and that has a harmless (for vacuum at\nleast) but slightly untidy behavior: When RT_SET is first called, and\nthe key is bigger than 255, new nodes will go on top of the root node.\nThese have chunk '0'. If all subsequent keys are big enough, the\norginal root node will stay empty. If all keys are deleted, there will\nbe a chain of empty nodes remaining. Again, I believe this is\nharmless, but to make tidy, it should easy to teach RT_EXTEND_UP to\ncall out to RT_EXTEND_DOWN if it finds the tree is empty. I can work\non this, but likely not today.\n\nThirdly, cosmetic: With the introduction of single-value leaves, it\nseems we should do s/RT_NODE_PTR/RT_CHILD_PTR/ -- what do you think?\n\n\n", "msg_date": "Thu, 29 Feb 2024 18:43:39 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I'm looking at RT_FREE_RECURSE again (only used for DSA memory), and\nI'm not convinced it's freeing all the memory. It's been many months\nsince we discussed this last, but IIRC we cannot just tell DSA to free\nall its segments, right? Is there currently anything preventing us\nfrom destroying the whole DSA area at once?\n\n+ /* The last level node has pointers to values */\n+ if (shift == 0)\n+ {\n+ dsa_free(tree->dsa, ptr);\n+ return;\n+ }\n\nIIUC, this doesn't actually free leaves, it only frees the last-level\nnode. And, this function is unaware of whether children could be\nembedded values. I'm thinking we need to get rid of the above\npre-check and instead, each node kind to have something like (e.g.\nnode4):\n\nRT_PTR_ALLOC child = n4->children[i];\n\nif (shift > 0)\n RT_FREE_RECURSE(tree, child, shift - RT_SPAN);\nelse if (!RT_CHILDPTR_IS_VALUE(child))\n dsa_free(tree->dsa, child);\n\n...or am I missing something?\n\n\n", "msg_date": "Fri, 1 Mar 2024 11:15:05 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I wrote:\n\n> Secondly, I thought about my recent work to skip checking if we first\n> need to create a root node, and that has a harmless (for vacuum at\n> least) but slightly untidy behavior: When RT_SET is first called, and\n> the key is bigger than 255, new nodes will go on top of the root node.\n> These have chunk '0'. If all subsequent keys are big enough, the\n> orginal root node will stay empty. If all keys are deleted, there will\n> be a chain of empty nodes remaining. Again, I believe this is\n> harmless, but to make tidy, it should easy to teach RT_EXTEND_UP to\n> call out to RT_EXTEND_DOWN if it finds the tree is empty. I can work\n> on this, but likely not today.\n\nThis turns out to be a lot trickier than it looked, so it seems best\nto allow a trivial amount of waste, as long as it's documented\nsomewhere. It also wouldn't be terrible to re-add those branches,\nsince they're highly predictable.\n\nI just noticed there are a lot of unused function parameters\n(referring to parent slots) leftover from a few weeks ago. Those are\nremoved in v64-0009. 0010 makes the obvious name change in those\nremaining to \"parent_slot\". 0011 is a simplification in two places\nregarding reserving slots. This should be a bit easier to read and\npossibly makes it easier on the compiler.", "msg_date": "Fri, 1 Mar 2024 13:58:07 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Feb 29, 2024 at 8:43 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Feb 20, 2024 at 1:59 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > - v63-0008 patch fixes a bug in tidstore.\n>\n> - page->nwords = wordnum + 1;\n> - Assert(page->nwords = WORDS_PER_PAGE(offsets[num_offsets - 1]));\n> + page->nwords = wordnum;\n> + Assert(page->nwords == WORDS_PER_PAGE(offsets[num_offsets - 1]));\n>\n> Yikes, I'm guessing this failed in a non-assert builds? I wonder why\n> my compiler didn't yell at me... Have you tried a tidstore-debug build\n> without asserts?\n\nYes. I didn't get any failures.\n\n>\n> > - v63-0009 patch is a draft idea of cleanup memory context handling.\n>\n> Thanks, looks pretty good!\n>\n> + ts->rt_context = AllocSetContextCreate(CurrentMemoryContext,\n> + \"tidstore storage\",\n>\n> \"tidstore storage\" sounds a bit strange -- maybe look at some other\n> context names for ideas.\n\nAgreed. How about \"tidstore's radix tree\"?\n\n>\n> - leaf.alloc = (RT_PTR_ALLOC) MemoryContextAlloc(tree->leaf_ctx, allocsize);\n> + leaf.alloc = (RT_PTR_ALLOC) MemoryContextAlloc(tree->leaf_ctx != NULL\n> + ? tree->leaf_ctx\n> + : tree->context, allocsize);\n>\n> Instead of branching here, can we copy \"context\" to \"leaf_ctx\" when\n> necessary (those names should look more like eachother, btw)? I think\n> that means anything not covered by this case:\n>\n> +#ifndef RT_VARLEN_VALUE_SIZE\n> + if (sizeof(RT_VALUE_TYPE) > sizeof(RT_PTR_ALLOC))\n> + tree->leaf_ctx = SlabContextCreate(ctx,\n> + RT_STR(RT_PREFIX) \"radix_tree leaf contex\",\n> + RT_SLAB_BLOCK_SIZE(sizeof(RT_VALUE_TYPE)),\n> + sizeof(RT_VALUE_TYPE));\n> +#endif /* !RT_VARLEN_VALUE_SIZE */\n>\n> ...also, we should document why we're using slab here. On that, I\n> don't recall why we are? We've never had a fixed-length type test case\n> on 64-bit, so it wasn't because it won through benchmarking. It seems\n> a hold-over from the days of \"multi-value leaves\". Is it to avoid the\n> possibility of space wastage with non-power-of-two size types?\n\nYes, it matches my understanding.\n\n>\n> For this stanza that remains unchanged:\n>\n> for (int i = 0; i < RT_SIZE_CLASS_COUNT; i++)\n> {\n> MemoryContextDelete(tree->node_slabs[i]);\n> }\n>\n> if (tree->leaf_ctx)\n> {\n> MemoryContextDelete(tree->leaf_ctx);\n> }\n>\n> ...is there a reason we can't just delete tree->ctx, and let that\n> recursively delete child contexts?\n\nI thought that considering the RT_CREATE doesn't create its own memory\ncontext but just uses the passed context, it might be a bit unusable\nto delete the passed context in the radix tree code. For example, if a\ncaller creates a radix tree (or tidstore) on a memory context and\nwants to recreate it again and again, he also needs to re-create the\nmemory context together. It might be okay if we leave comments on\nRT_CREATE as a side effect, though. This is the same reason why we\ndon't destroy tree->dsa in RT_FREE(). And, as for RT_FREE_RECURSE(),\n\nOn Fri, Mar 1, 2024 at 1:15 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> I'm looking at RT_FREE_RECURSE again (only used for DSA memory), and\n> I'm not convinced it's freeing all the memory. It's been many months\n> since we discussed this last, but IIRC we cannot just tell DSA to free\n> all its segments, right?\n\nRight.\n\n> Is there currently anything preventing us\n> from destroying the whole DSA area at once?\n\nWhen it comes to tidstore and parallel vacuum, we initialize DSA and\ncreate a tidstore there at the beginning of the lazy vacuum, and\nrecreate the tidstore again after the heap vacuum. So I don't want to\ndestroy the whole DSA when destroying the tidstore. Otherwise, we will\nneed to create a new DSA and pass its handle somehow.\n\nProbably the bitmap scan case is similar. Given that bitmap scan\n(re)creates tidbitmap in the same DSA multiple times, it's better to\navoid freeing the whole DSA.\n\n>\n> + /* The last level node has pointers to values */\n> + if (shift == 0)\n> + {\n> + dsa_free(tree->dsa, ptr);\n> + return;\n> + }\n>\n> IIUC, this doesn't actually free leaves, it only frees the last-level\n> node. And, this function is unaware of whether children could be\n> embedded values. I'm thinking we need to get rid of the above\n> pre-check and instead, each node kind to have something like (e.g.\n> node4):\n>\n> RT_PTR_ALLOC child = n4->children[i];\n>\n> if (shift > 0)\n> RT_FREE_RECURSE(tree, child, shift - RT_SPAN);\n> else if (!RT_CHILDPTR_IS_VALUE(child))\n> dsa_free(tree->dsa, child);\n>\n> ...or am I missing something?\n\nYou're not missing anything. RT_FREE_RECURSE() has not been updated\nfor a long time. If we still need to use RT_FREE_RECURSE(), it should\nbe updated.\n\n> Thirdly, cosmetic: With the introduction of single-value leaves, it\n> seems we should do s/RT_NODE_PTR/RT_CHILD_PTR/ -- what do you think?\n\nAgreed.\n\nOn Fri, Mar 1, 2024 at 3:58 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> I wrote:\n>\n> > Secondly, I thought about my recent work to skip checking if we first\n> > need to create a root node, and that has a harmless (for vacuum at\n> > least) but slightly untidy behavior: When RT_SET is first called, and\n> > the key is bigger than 255, new nodes will go on top of the root node.\n> > These have chunk '0'. If all subsequent keys are big enough, the\n> > orginal root node will stay empty. If all keys are deleted, there will\n> > be a chain of empty nodes remaining. Again, I believe this is\n> > harmless, but to make tidy, it should easy to teach RT_EXTEND_UP to\n> > call out to RT_EXTEND_DOWN if it finds the tree is empty. I can work\n> > on this, but likely not today.\n>\n> This turns out to be a lot trickier than it looked, so it seems best\n> to allow a trivial amount of waste, as long as it's documented\n> somewhere. It also wouldn't be terrible to re-add those branches,\n> since they're highly predictable.\n>\n> I just noticed there are a lot of unused function parameters\n> (referring to parent slots) leftover from a few weeks ago. Those are\n> removed in v64-0009. 0010 makes the obvious name change in those\n> remaining to \"parent_slot\". 0011 is a simplification in two places\n> regarding reserving slots. This should be a bit easier to read and\n> possibly makes it easier on the compiler.\n\nThank you for the updates. I've briefly looked at these changes and\nthey look good to me. I'm going to review them again in depth.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 1 Mar 2024 17:00:21 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 1, 2024 at 3:01 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Feb 29, 2024 at 8:43 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > + ts->rt_context = AllocSetContextCreate(CurrentMemoryContext,\n> > + \"tidstore storage\",\n> >\n> > \"tidstore storage\" sounds a bit strange -- maybe look at some other\n> > context names for ideas.\n>\n> Agreed. How about \"tidstore's radix tree\"?\n\nThat might be okay. I'm now thinking \"TID storage\". On that note, one\nimprovement needed when we polish tidstore.c is to make sure it's\nspelled \"TID\" in comments, like other files do already.\n\n> > - leaf.alloc = (RT_PTR_ALLOC) MemoryContextAlloc(tree->leaf_ctx, allocsize);\n> > + leaf.alloc = (RT_PTR_ALLOC) MemoryContextAlloc(tree->leaf_ctx != NULL\n> > + ? tree->leaf_ctx\n> > + : tree->context, allocsize);\n> >\n> > Instead of branching here, can we copy \"context\" to \"leaf_ctx\" when\n> > necessary (those names should look more like eachother, btw)? I think\n> > that means anything not covered by this case:\n> >\n> > +#ifndef RT_VARLEN_VALUE_SIZE\n> > + if (sizeof(RT_VALUE_TYPE) > sizeof(RT_PTR_ALLOC))\n> > + tree->leaf_ctx = SlabContextCreate(ctx,\n> > + RT_STR(RT_PREFIX) \"radix_tree leaf contex\",\n> > + RT_SLAB_BLOCK_SIZE(sizeof(RT_VALUE_TYPE)),\n> > + sizeof(RT_VALUE_TYPE));\n> > +#endif /* !RT_VARLEN_VALUE_SIZE */\n> >\n> > ...also, we should document why we're using slab here. On that, I\n> > don't recall why we are? We've never had a fixed-length type test case\n> > on 64-bit, so it wasn't because it won through benchmarking. It seems\n> > a hold-over from the days of \"multi-value leaves\". Is it to avoid the\n> > possibility of space wastage with non-power-of-two size types?\n>\n> Yes, it matches my understanding.\n\nThere are two issues quoted here, so not sure if you mean both or only\nthe last one...\n\nFor the latter, I'm not sure it makes sense to have code and #ifdef's\nto force slab for large-enough fixed-length values just because we\ncan. There may never be such a use-case anyway. I'm also not against\nit, either, but it seems like a premature optimization.\n\n> > For this stanza that remains unchanged:\n> >\n> > for (int i = 0; i < RT_SIZE_CLASS_COUNT; i++)\n> > {\n> > MemoryContextDelete(tree->node_slabs[i]);\n> > }\n> >\n> > if (tree->leaf_ctx)\n> > {\n> > MemoryContextDelete(tree->leaf_ctx);\n> > }\n> >\n> > ...is there a reason we can't just delete tree->ctx, and let that\n> > recursively delete child contexts?\n>\n> I thought that considering the RT_CREATE doesn't create its own memory\n> context but just uses the passed context, it might be a bit unusable\n> to delete the passed context in the radix tree code. For example, if a\n> caller creates a radix tree (or tidstore) on a memory context and\n> wants to recreate it again and again, he also needs to re-create the\n> memory context together. It might be okay if we leave comments on\n> RT_CREATE as a side effect, though. This is the same reason why we\n> don't destroy tree->dsa in RT_FREE(). And, as for RT_FREE_RECURSE(),\n\nRight, I should have said \"reset\". Resetting a context will delete\nit's children as well, and seems like it should work to reset the tree\ncontext, and we don't have to know whether that context actually\ncontains leaves at all. That should allow copying \"tree context\" to\n\"leaf context\" in the case where we have no special context for\nleaves.\n\n\n", "msg_date": "Sun, 3 Mar 2024 12:43:05 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sun, Mar 3, 2024 at 2:43 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Fri, Mar 1, 2024 at 3:01 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Feb 29, 2024 at 8:43 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > + ts->rt_context = AllocSetContextCreate(CurrentMemoryContext,\n> > > + \"tidstore storage\",\n> > >\n> > > \"tidstore storage\" sounds a bit strange -- maybe look at some other\n> > > context names for ideas.\n> >\n> > Agreed. How about \"tidstore's radix tree\"?\n>\n> That might be okay. I'm now thinking \"TID storage\". On that note, one\n> improvement needed when we polish tidstore.c is to make sure it's\n> spelled \"TID\" in comments, like other files do already.\n>\n> > > - leaf.alloc = (RT_PTR_ALLOC) MemoryContextAlloc(tree->leaf_ctx, allocsize);\n> > > + leaf.alloc = (RT_PTR_ALLOC) MemoryContextAlloc(tree->leaf_ctx != NULL\n> > > + ? tree->leaf_ctx\n> > > + : tree->context, allocsize);\n> > >\n> > > Instead of branching here, can we copy \"context\" to \"leaf_ctx\" when\n> > > necessary (those names should look more like eachother, btw)? I think\n> > > that means anything not covered by this case:\n> > >\n> > > +#ifndef RT_VARLEN_VALUE_SIZE\n> > > + if (sizeof(RT_VALUE_TYPE) > sizeof(RT_PTR_ALLOC))\n> > > + tree->leaf_ctx = SlabContextCreate(ctx,\n> > > + RT_STR(RT_PREFIX) \"radix_tree leaf contex\",\n> > > + RT_SLAB_BLOCK_SIZE(sizeof(RT_VALUE_TYPE)),\n> > > + sizeof(RT_VALUE_TYPE));\n> > > +#endif /* !RT_VARLEN_VALUE_SIZE */\n> > >\n> > > ...also, we should document why we're using slab here. On that, I\n> > > don't recall why we are? We've never had a fixed-length type test case\n> > > on 64-bit, so it wasn't because it won through benchmarking. It seems\n> > > a hold-over from the days of \"multi-value leaves\". Is it to avoid the\n> > > possibility of space wastage with non-power-of-two size types?\n> >\n> > Yes, it matches my understanding.\n>\n> There are two issues quoted here, so not sure if you mean both or only\n> the last one...\n\nI meant only the last one.\n\n>\n> For the latter, I'm not sure it makes sense to have code and #ifdef's\n> to force slab for large-enough fixed-length values just because we\n> can. There may never be such a use-case anyway. I'm also not against\n> it, either, but it seems like a premature optimization.\n\nReading the old threads, the fact that using a slab context for leaves\noriginally came from Andres's prototype patch, was to avoid rounding\nup the bytes to a power of 2 number by aset.c. It makes sense to me to\nuse a slab context for this case. To measure the effect of using a\nslab, I've updated bench_radix_tree so it uses a large fixed-length\nvalue. The struct I used is:\n\ntypedef struct mytype\n{\n uint64 a;\n uint64 b;\n uint64 c;\n uint64 d;\n char e[100];\n} mytype;\n\nThe struct size is 136 bytes with padding, just above a power-of-2.\nThe simple benchmark test showed using a slab context for leaves is\nmore space efficient. The results are:\n\nslab:\n= #select * from bench_load_random_int(1000000);\n mem_allocated | load_ms\n---------------+---------\n 405643264 | 560\n(1 row)\n\naset:\n=# select * from bench_load_random_int(1000000);\n mem_allocated | load_ms\n---------------+---------\n 527777792 | 576\n(1 row)\n\n>\n> > > For this stanza that remains unchanged:\n> > >\n> > > for (int i = 0; i < RT_SIZE_CLASS_COUNT; i++)\n> > > {\n> > > MemoryContextDelete(tree->node_slabs[i]);\n> > > }\n> > >\n> > > if (tree->leaf_ctx)\n> > > {\n> > > MemoryContextDelete(tree->leaf_ctx);\n> > > }\n> > >\n> > > ...is there a reason we can't just delete tree->ctx, and let that\n> > > recursively delete child contexts?\n> >\n> > I thought that considering the RT_CREATE doesn't create its own memory\n> > context but just uses the passed context, it might be a bit unusable\n> > to delete the passed context in the radix tree code. For example, if a\n> > caller creates a radix tree (or tidstore) on a memory context and\n> > wants to recreate it again and again, he also needs to re-create the\n> > memory context together. It might be okay if we leave comments on\n> > RT_CREATE as a side effect, though. This is the same reason why we\n> > don't destroy tree->dsa in RT_FREE(). And, as for RT_FREE_RECURSE(),\n>\n> Right, I should have said \"reset\". Resetting a context will delete\n> it's children as well, and seems like it should work to reset the tree\n> context, and we don't have to know whether that context actually\n> contains leaves at all. That should allow copying \"tree context\" to\n> \"leaf context\" in the case where we have no special context for\n> leaves.\n\nResetting the tree->context seems to work. But I think we should note\nfor callers that the dsa_area passed to RT_CREATE should be created in\na different context than the context passed to RT_CREATE because\notherwise RT_FREE() will also free the dsa_area. For example, the\nfollowing code in test_radixtree.c will no longer work:\n\ndsa = dsa_create(tranche_id);\nradixtree = rt_create(CurrentMemoryContext, dsa, tranche_id);\n:\nrt_free(radixtree);\ndsa_detach(dsa); // dsa is already freed.\n\nSo I think that a practical usage of the radix tree will be that the\ncaller creates a memory context for a radix tree and passes it to\nRT_CREATE().\n\nI've attached an update patch set:\n\n- 0008 updates RT_FREE_RECURSE().\n- 0009 patch is an updated version of cleanup radix tree memory handling.\n- 0010 updates comments in tidstore.c such as replacing \"Tid\" with \"TID\".\n- 0011 rename TidStore to TIDSTORE all places.\n- 0012 update bench_radix_tree so it uses a (possibly large) struct\ninstead of uint64.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 4 Mar 2024 15:05:12 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 4, 2024 at 1:05 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Sun, Mar 3, 2024 at 2:43 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > Right, I should have said \"reset\". Resetting a context will delete\n> > it's children as well, and seems like it should work to reset the tree\n> > context, and we don't have to know whether that context actually\n> > contains leaves at all. That should allow copying \"tree context\" to\n> > \"leaf context\" in the case where we have no special context for\n> > leaves.\n>\n> Resetting the tree->context seems to work. But I think we should note\n> for callers that the dsa_area passed to RT_CREATE should be created in\n> a different context than the context passed to RT_CREATE because\n> otherwise RT_FREE() will also free the dsa_area. For example, the\n> following code in test_radixtree.c will no longer work:\n>\n> dsa = dsa_create(tranche_id);\n> radixtree = rt_create(CurrentMemoryContext, dsa, tranche_id);\n> :\n> rt_free(radixtree);\n> dsa_detach(dsa); // dsa is already freed.\n>\n> So I think that a practical usage of the radix tree will be that the\n> caller creates a memory context for a radix tree and passes it to\n> RT_CREATE().\n\nThat sounds workable to me.\n\n> I've attached an update patch set:\n>\n> - 0008 updates RT_FREE_RECURSE().\n\nThanks!\n\n> - 0009 patch is an updated version of cleanup radix tree memory handling.\n\nLooks pretty good, as does the rest. I'm going through again,\nsquashing and making tiny adjustments to the template. The only thing\nnot done is changing the test with many values to resemble the perf\ntest more.\n\nI wrote:\n> > Secondly, I thought about my recent work to skip checking if we first\n> > need to create a root node, and that has a harmless (for vacuum at\n> > least) but slightly untidy behavior: When RT_SET is first called, and\n> > the key is bigger than 255, new nodes will go on top of the root node.\n> > These have chunk '0'. If all subsequent keys are big enough, the\n> > orginal root node will stay empty. If all keys are deleted, there will\n> > be a chain of empty nodes remaining. Again, I believe this is\n> > harmless, but to make tidy, it should easy to teach RT_EXTEND_UP to\n> > call out to RT_EXTEND_DOWN if it finds the tree is empty. I can work\n> > on this, but likely not today.\n>\n> This turns out to be a lot trickier than it looked, so it seems best\n> to allow a trivial amount of waste, as long as it's documented\n> somewhere. It also wouldn't be terrible to re-add those branches,\n> since they're highly predictable.\n\nI put a little more work into this, and got it working, just needs a\nsmall amount of finicky coding. I'll share tomorrow.\n\nI have a question about RT_FREE_RECURSE:\n\n+ check_stack_depth();\n+ CHECK_FOR_INTERRUPTS();\n\nI'm not sure why these are here: The first seems overly paranoid,\nalthough harmless, but the second is probably a bad idea. Why should\nthe user be able to to interrupt the freeing of memory?\n\nAlso, I'm not quite happy that RT_ITER has a copy of a pointer to the\ntree, leading to coding like \"iter->tree->ctl->root\". I *think* it\nwould be easier to read if the tree was a parameter to these iteration\nfunctions. That would require an API change, so the tests/tidstore\nwould have some churn. I can do that, but before trying I wanted to\nsee what you think -- is there some reason to keep the current way?\n\n\n", "msg_date": "Mon, 4 Mar 2024 18:48:21 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 4, 2024 at 8:48 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Mon, Mar 4, 2024 at 1:05 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Sun, Mar 3, 2024 at 2:43 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > Right, I should have said \"reset\". Resetting a context will delete\n> > > it's children as well, and seems like it should work to reset the tree\n> > > context, and we don't have to know whether that context actually\n> > > contains leaves at all. That should allow copying \"tree context\" to\n> > > \"leaf context\" in the case where we have no special context for\n> > > leaves.\n> >\n> > Resetting the tree->context seems to work. But I think we should note\n> > for callers that the dsa_area passed to RT_CREATE should be created in\n> > a different context than the context passed to RT_CREATE because\n> > otherwise RT_FREE() will also free the dsa_area. For example, the\n> > following code in test_radixtree.c will no longer work:\n> >\n> > dsa = dsa_create(tranche_id);\n> > radixtree = rt_create(CurrentMemoryContext, dsa, tranche_id);\n> > :\n> > rt_free(radixtree);\n> > dsa_detach(dsa); // dsa is already freed.\n> >\n> > So I think that a practical usage of the radix tree will be that the\n> > caller creates a memory context for a radix tree and passes it to\n> > RT_CREATE().\n>\n> That sounds workable to me.\n>\n> > I've attached an update patch set:\n> >\n> > - 0008 updates RT_FREE_RECURSE().\n>\n> Thanks!\n>\n> > - 0009 patch is an updated version of cleanup radix tree memory handling.\n>\n> Looks pretty good, as does the rest. I'm going through again,\n> squashing and making tiny adjustments to the template. The only thing\n> not done is changing the test with many values to resemble the perf\n> test more.\n>\n> I wrote:\n> > > Secondly, I thought about my recent work to skip checking if we first\n> > > need to create a root node, and that has a harmless (for vacuum at\n> > > least) but slightly untidy behavior: When RT_SET is first called, and\n> > > the key is bigger than 255, new nodes will go on top of the root node.\n> > > These have chunk '0'. If all subsequent keys are big enough, the\n> > > orginal root node will stay empty. If all keys are deleted, there will\n> > > be a chain of empty nodes remaining. Again, I believe this is\n> > > harmless, but to make tidy, it should easy to teach RT_EXTEND_UP to\n> > > call out to RT_EXTEND_DOWN if it finds the tree is empty. I can work\n> > > on this, but likely not today.\n> >\n> > This turns out to be a lot trickier than it looked, so it seems best\n> > to allow a trivial amount of waste, as long as it's documented\n> > somewhere. It also wouldn't be terrible to re-add those branches,\n> > since they're highly predictable.\n>\n> I put a little more work into this, and got it working, just needs a\n> small amount of finicky coding. I'll share tomorrow.\n>\n> I have a question about RT_FREE_RECURSE:\n>\n> + check_stack_depth();\n> + CHECK_FOR_INTERRUPTS();\n>\n> I'm not sure why these are here: The first seems overly paranoid,\n> although harmless, but the second is probably a bad idea. Why should\n> the user be able to to interrupt the freeing of memory?\n\nGood catch. We should not check the interruption there.\n\n> Also, I'm not quite happy that RT_ITER has a copy of a pointer to the\n> tree, leading to coding like \"iter->tree->ctl->root\". I *think* it\n> would be easier to read if the tree was a parameter to these iteration\n> functions. That would require an API change, so the tests/tidstore\n> would have some churn. I can do that, but before trying I wanted to\n> see what you think -- is there some reason to keep the current way?\n\nI considered both usages, there are two reasons for the current style.\nI'm concerned that if we pass both the tree and RT_ITER to iteration\nfunctions, the caller could mistakenly pass a different tree than the\none that was specified to create the RT_ITER. And the second reason is\njust to make it consistent with other data structures such as\ndynahash.c and dshash.c, but I now realized that in simplehash.h we\npass both the hash table and the iterator.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Tue, 5 Mar 2024 10:27:21 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Mar 5, 2024 at 8:27 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Mar 4, 2024 at 8:48 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Mon, Mar 4, 2024 at 1:05 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> > > Resetting the tree->context seems to work. But I think we should note\n> > > for callers that the dsa_area passed to RT_CREATE should be created in\n> > > a different context than the context passed to RT_CREATE because\n> > > otherwise RT_FREE() will also free the dsa_area. For example, the\n> > > following code in test_radixtree.c will no longer work:\n\nI've added a comment in v66-0004, which contains a number of other\nsmall corrections and edits.\n\nOn Fri, Mar 1, 2024 at 3:01 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > Thirdly, cosmetic: With the introduction of single-value leaves, it\n> > seems we should do s/RT_NODE_PTR/RT_CHILD_PTR/ -- what do you think?\n>\n> Agreed.\n\nDone in v66-0005.\n\nv66-0006 removes outdated tests for invalid root that somehow got left over.\n\n> > I wrote:\n> > > > Secondly, I thought about my recent work to skip checking if we first\n> > > > need to create a root node, and that has a harmless (for vacuum at\n> > > > least) but slightly untidy behavior: When RT_SET is first called, and\n> > > > the key is bigger than 255, new nodes will go on top of the root node.\n> > > > These have chunk '0'. If all subsequent keys are big enough, the\n> > > > orginal root node will stay empty. If all keys are deleted, there will\n> > > > be a chain of empty nodes remaining. Again, I believe this is\n> > > > harmless, but to make tidy, it should easy to teach RT_EXTEND_UP to\n> > > > call out to RT_EXTEND_DOWN if it finds the tree is empty. I can work\n> > > > on this, but likely not today.\n\n> > I put a little more work into this, and got it working, just needs a\n> > small amount of finicky coding. I'll share tomorrow.\n\nDone in v66-0007. I'm a bit disappointed in the extra messiness this\nadds, although it's not a lot.\n\n> > + check_stack_depth();\n> > + CHECK_FOR_INTERRUPTS();\n> >\n> > I'm not sure why these are here: The first seems overly paranoid,\n> > although harmless, but the second is probably a bad idea. Why should\n> > the user be able to to interrupt the freeing of memory?\n>\n> Good catch. We should not check the interruption there.\n\nRemoved in v66-0008.\n\n> > Also, I'm not quite happy that RT_ITER has a copy of a pointer to the\n> > tree, leading to coding like \"iter->tree->ctl->root\". I *think* it\n> > would be easier to read if the tree was a parameter to these iteration\n> > functions. That would require an API change, so the tests/tidstore\n> > would have some churn. I can do that, but before trying I wanted to\n> > see what you think -- is there some reason to keep the current way?\n>\n> I considered both usages, there are two reasons for the current style.\n> I'm concerned that if we pass both the tree and RT_ITER to iteration\n> functions, the caller could mistakenly pass a different tree than the\n> one that was specified to create the RT_ITER. And the second reason is\n> just to make it consistent with other data structures such as\n> dynahash.c and dshash.c, but I now realized that in simplehash.h we\n> pass both the hash table and the iterator.\n\nOkay, then I don't think it's worth messing with at this point.\n\nOn Tue, Feb 6, 2024 at 9:58 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Fri, Feb 2, 2024 at 8:47 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > It's pretty hard to see what test_pattern() is doing, or why it's\n> > useful. I wonder if instead the test could use something like the\n> > benchmark where random integers are masked off. That seems simpler. I\n> > can work on that, but I'd like to hear your side about test_pattern().\n>\n> Yeah, test_pattern() is originally created for the integerset so it\n> doesn't necessarily fit the radixtree. I agree to use some tests from\n> benchmarks.\n\nDone in v66-0009. I'd be curious to hear any feedback. I like the\naspect that the random numbers come from a different seed every time\nthe test runs.\n\nv66-0010/0011 run pgindent, the latter with one typedef added for the\ntest module. 0012 - 0017 are copied from v65, and I haven't done any\nwork on tidstore or vacuum, except for squashing most v65 follow-up\npatches.\n\nI'd like to push 0001 and 0002 shortly, and then do another sweep over\n0003, with remaining feedback, and get that in so we get some\nbuildfarm testing before the remaining polishing work on\ntidstore/vacuum.", "msg_date": "Tue, 5 Mar 2024 16:41:30 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Mar 5, 2024 at 6:41 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Feb 6, 2024 at 9:58 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Fri, Feb 2, 2024 at 8:47 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > It's pretty hard to see what test_pattern() is doing, or why it's\n> > > useful. I wonder if instead the test could use something like the\n> > > benchmark where random integers are masked off. That seems simpler. I\n> > > can work on that, but I'd like to hear your side about test_pattern().\n> >\n> > Yeah, test_pattern() is originally created for the integerset so it\n> > doesn't necessarily fit the radixtree. I agree to use some tests from\n> > benchmarks.\n>\n> Done in v66-0009. I'd be curious to hear any feedback. I like the\n> aspect that the random numbers come from a different seed every time\n> the test runs.\n\nThe new tests look good. Here are some comments:\n\n---\n+ expected = keys[i];\n+ iterval = rt_iterate_next(iter, &iterkey);\n\n- ndeleted++;\n+ EXPECT_TRUE(iterval != NULL);\n+ EXPECT_EQ_U64(iterkey, expected);\n+ EXPECT_EQ_U64(*iterval, expected);\n\nCan we verify that the iteration returns keys in ascending order?\n\n---\n+ /* reset random number generator for deletion */\n+ pg_prng_seed(&state, seed);\n\nWhy is resetting the seed required here?\n\n---\nThe radix tree (and dsa in TSET_SHARED_RT case) should be freed at the end.\n\n---\n radixtree_ctx = AllocSetContextCreate(CurrentMemoryContext,\n \"test_radix_tree\",\n ALLOCSET_DEFAULT_SIZES);\n\nWe use a mix of ALLOCSET_DEFAULT_SIZES and ALLOCSET_SMALL_SIZES. I\nthink it's better to use either one for consistency.\n\n> I'd like to push 0001 and 0002 shortly, and then do another sweep over\n> 0003, with remaining feedback, and get that in so we get some\n> buildfarm testing before the remaining polishing work on\n> tidstore/vacuum.\n\nSounds a reasonable plan. 0001 and 0002 look good to me. I'm going to\npolish tidstore and vacuum patches and update commit messages.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 6 Mar 2024 01:11:43 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Mar 5, 2024 at 11:12 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Tue, Mar 5, 2024 at 6:41 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > Done in v66-0009. I'd be curious to hear any feedback. I like the\n> > aspect that the random numbers come from a different seed every time\n> > the test runs.\n>\n> The new tests look good. Here are some comments:\n>\n> ---\n> + expected = keys[i];\n> + iterval = rt_iterate_next(iter, &iterkey);\n>\n> - ndeleted++;\n> + EXPECT_TRUE(iterval != NULL);\n> + EXPECT_EQ_U64(iterkey, expected);\n> + EXPECT_EQ_U64(*iterval, expected);\n>\n> Can we verify that the iteration returns keys in ascending order?\n\nWe get the \"expected\" value from the keys we saved in the now-sorted\narray, so we do already. Unless I misunderstand you.\n\n> ---\n> + /* reset random number generator for deletion */\n> + pg_prng_seed(&state, seed);\n>\n> Why is resetting the seed required here?\n\nGood catch - My intention was to delete in the same random order we\ninserted with. We still have the keys in the array, but they're sorted\nby now. I forgot to go the extra step and use the prng when generating\nthe keys for deletion -- will fix.\n\n> ---\n> The radix tree (and dsa in TSET_SHARED_RT case) should be freed at the end.\n\nWill fix.\n\n> ---\n> radixtree_ctx = AllocSetContextCreate(CurrentMemoryContext,\n> \"test_radix_tree\",\n> ALLOCSET_DEFAULT_SIZES);\n>\n> We use a mix of ALLOCSET_DEFAULT_SIZES and ALLOCSET_SMALL_SIZES. I\n> think it's better to use either one for consistency.\n\nWill change to \"small\", since 32-bit platforms will use slab for leaves.\n\nI'll look at the memory usage and estimate what 32-bit platforms will\nuse, and maybe adjust the number of keys. A few megabytes is fine, but\nnot many megabytes.\n\n> > I'd like to push 0001 and 0002 shortly, and then do another sweep over\n> > 0003, with remaining feedback, and get that in so we get some\n> > buildfarm testing before the remaining polishing work on\n> > tidstore/vacuum.\n>\n> Sounds a reasonable plan. 0001 and 0002 look good to me. I'm going to\n> polish tidstore and vacuum patches and update commit messages.\n\nSounds good.\n\n\n", "msg_date": "Wed, 6 Mar 2024 10:59:34 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 6, 2024 at 12:59 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Mar 5, 2024 at 11:12 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Tue, Mar 5, 2024 at 6:41 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > Done in v66-0009. I'd be curious to hear any feedback. I like the\n> > > aspect that the random numbers come from a different seed every time\n> > > the test runs.\n> >\n> > The new tests look good. Here are some comments:\n> >\n> > ---\n> > + expected = keys[i];\n> > + iterval = rt_iterate_next(iter, &iterkey);\n> >\n> > - ndeleted++;\n> > + EXPECT_TRUE(iterval != NULL);\n> > + EXPECT_EQ_U64(iterkey, expected);\n> > + EXPECT_EQ_U64(*iterval, expected);\n> >\n> > Can we verify that the iteration returns keys in ascending order?\n>\n> We get the \"expected\" value from the keys we saved in the now-sorted\n> array, so we do already. Unless I misunderstand you.\n\nAh, you're right. Please ignore this comment.\n\n>\n> > ---\n> > radixtree_ctx = AllocSetContextCreate(CurrentMemoryContext,\n> > \"test_radix_tree\",\n> > ALLOCSET_DEFAULT_SIZES);\n> >\n> > We use a mix of ALLOCSET_DEFAULT_SIZES and ALLOCSET_SMALL_SIZES. I\n> > think it's better to use either one for consistency.\n>\n> Will change to \"small\", since 32-bit platforms will use slab for leaves.\n\nAgreed.\n\n>\n> I'll look at the memory usage and estimate what 32-bit platforms will\n> use, and maybe adjust the number of keys. A few megabytes is fine, but\n> not many megabytes.\n\nThanks, sounds good.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 6 Mar 2024 15:45:22 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2024-03-05 16:41:30 +0700, John Naylor wrote:\n> I'd like to push 0001 and 0002 shortly, and then do another sweep over\n> 0003, with remaining feedback, and get that in so we get some\n> buildfarm testing before the remaining polishing work on\n> tidstore/vacuum.\n\nA few ARM buildfarm animals are complaining:\n\nhttps://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=parula&dt=2024-03-06%2007%3A34%3A02\nhttps://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=snakefly&dt=2024-03-06%2007%3A34%3A03\nhttps://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=massasauga&dt=2024-03-06%2007%3A33%3A18\n\nGreetings,\n\nAndres Freund\n\n\n", "msg_date": "Tue, 5 Mar 2024 23:41:06 -0800", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 6, 2024 at 4:41 PM Andres Freund <andres@anarazel.de> wrote:\n>\n> Hi,\n>\n> On 2024-03-05 16:41:30 +0700, John Naylor wrote:\n> > I'd like to push 0001 and 0002 shortly, and then do another sweep over\n> > 0003, with remaining feedback, and get that in so we get some\n> > buildfarm testing before the remaining polishing work on\n> > tidstore/vacuum.\n>\n> A few ARM buildfarm animals are complaining:\n>\n> https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=parula&dt=2024-03-06%2007%3A34%3A02\n> https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=snakefly&dt=2024-03-06%2007%3A34%3A03\n> https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=massasauga&dt=2024-03-06%2007%3A33%3A18\n>\n\nThe error message we got is:\n\n../../src/include/port/simd.h:326:71: error: incompatible type for\nargument 1 of \\342\\200\\230vshrq_n_s8\\342\\200\\231\n uint8x16_t masked = vandq_u8(vld1q_u8(mask), (uint8x16_t) vshrq_n_s8(v, 7));\n ^\n\nSince 'v' is uint8x16_t I think we should have used vshrq_n_u8() instead.\n\nRegard,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 6 Mar 2024 17:02:17 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 6, 2024 at 3:02 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Wed, Mar 6, 2024 at 4:41 PM Andres Freund <andres@anarazel.de> wrote:\n\n> > A few ARM buildfarm animals are complaining:\n> >\n> > https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=parula&dt=2024-03-06%2007%3A34%3A02\n> > https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=snakefly&dt=2024-03-06%2007%3A34%3A03\n> > https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=massasauga&dt=2024-03-06%2007%3A33%3A18\n> >\n>\n> The error message we got is:\n>\n> ../../src/include/port/simd.h:326:71: error: incompatible type for\n> argument 1 of \\342\\200\\230vshrq_n_s8\\342\\200\\231\n> uint8x16_t masked = vandq_u8(vld1q_u8(mask), (uint8x16_t) vshrq_n_s8(v, 7));\n> ^\n>\n> Since 'v' is uint8x16_t I think we should have used vshrq_n_u8() instead.\n\nThat sounds plausible, and I'll look further.\n\n(Hmm, I thought we had run this code on Arm already...)\n\n\n", "msg_date": "Wed, 6 Mar 2024 15:06:50 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi, \n\nOn March 6, 2024 9:06:50 AM GMT+01:00, John Naylor <johncnaylorls@gmail.com> wrote:\n>On Wed, Mar 6, 2024 at 3:02 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>>\n>> On Wed, Mar 6, 2024 at 4:41 PM Andres Freund <andres@anarazel.de> wrote:\n>\n>> > A few ARM buildfarm animals are complaining:\n>> >\n>> > https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=parula&dt=2024-03-06%2007%3A34%3A02\n>> > https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=snakefly&dt=2024-03-06%2007%3A34%3A03\n>> > https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=massasauga&dt=2024-03-06%2007%3A33%3A18\n>> >\n>>\n>> The error message we got is:\n>>\n>> ../../src/include/port/simd.h:326:71: error: incompatible type for\n>> argument 1 of \\342\\200\\230vshrq_n_s8\\342\\200\\231\n>> uint8x16_t masked = vandq_u8(vld1q_u8(mask), (uint8x16_t) vshrq_n_s8(v, 7));\n>> ^\n>>\n>> Since 'v' is uint8x16_t I think we should have used vshrq_n_u8() instead.\n>\n>That sounds plausible, and I'll look further.\n>\n>(Hmm, I thought we had run this code on Arm already...)\n\nPerhaps we should switch one of the CI jobs to ARM...\n\nAndres \n\n-- \nSent from my Android device with K-9 Mail. Please excuse my brevity.\n\n\n", "msg_date": "Wed, 06 Mar 2024 09:13:14 +0100", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 6, 2024 at 3:06 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> (Hmm, I thought we had run this code on Arm already...)\n\nCI MacOS uses Clang on aarch64, which has been working fine. The\nfailing animals are on gcc 7.3...\n\n\n", "msg_date": "Wed, 6 Mar 2024 15:22:50 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 6, 2024 at 3:02 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> ../../src/include/port/simd.h:326:71: error: incompatible type for\n> argument 1 of \\342\\200\\230vshrq_n_s8\\342\\200\\231\n> uint8x16_t masked = vandq_u8(vld1q_u8(mask), (uint8x16_t) vshrq_n_s8(v, 7));\n> ^\n>\n> Since 'v' is uint8x16_t I think we should have used vshrq_n_u8() instead.\n\nI've looked around and it seems clang is more lax on conversions.\nSince it works fine for clang, I think we just need a cast here for\ngcc. I've attached a blind attempt at a fix -- I'll apply shortly\nunless someone happens to test and find it doesn't work.", "msg_date": "Wed, 6 Mar 2024 15:33:44 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 6, 2024 at 5:33 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Wed, Mar 6, 2024 at 3:02 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > ../../src/include/port/simd.h:326:71: error: incompatible type for\n> > argument 1 of \\342\\200\\230vshrq_n_s8\\342\\200\\231\n> > uint8x16_t masked = vandq_u8(vld1q_u8(mask), (uint8x16_t) vshrq_n_s8(v, 7));\n> > ^\n> >\n> > Since 'v' is uint8x16_t I think we should have used vshrq_n_u8() instead.\n>\n> I've looked around and it seems clang is more lax on conversions.\n> Since it works fine for clang, I think we just need a cast here for\n> gcc. I've attached a blind attempt at a fix -- I'll apply shortly\n> unless someone happens to test and find it doesn't work.\n\nI've reproduced the same error on my raspberry pi, and confirmed the\npatch fixes the error.\n\nMy previous idea was wrong. With my proposal, the regression test for\nradix tree failed on my raspberry pi. On the other hand, with your\npatch the tests passed.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 6 Mar 2024 17:40:20 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 6, 2024 at 3:40 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Wed, Mar 6, 2024 at 5:33 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > I've looked around and it seems clang is more lax on conversions.\n> > Since it works fine for clang, I think we just need a cast here for\n> > gcc. I've attached a blind attempt at a fix -- I'll apply shortly\n> > unless someone happens to test and find it doesn't work.\n>\n> I've reproduced the same error on my raspberry pi, and confirmed the\n> patch fixes the error.\n>\n> My previous idea was wrong. With my proposal, the regression test for\n> radix tree failed on my raspberry pi. On the other hand, with your\n> patch the tests passed.\n\nPushed, and at least parula's green now, thanks for testing! And\nthanks, Andres, for the ping!\n\n\n", "msg_date": "Wed, 6 Mar 2024 16:10:35 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Mar 5, 2024 at 11:12 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I'd like to push 0001 and 0002 shortly, and then do another sweep over\n> > 0003, with remaining feedback, and get that in so we get some\n> > buildfarm testing before the remaining polishing work on\n> > tidstore/vacuum.\n>\n> Sounds a reasonable plan. 0001 and 0002 look good to me. I'm going to\n> polish tidstore and vacuum patches and update commit messages.\n\nI don't think v66 got a CI run because of vacuumlazy.c bitrot, so I'm\nattaching v67 which fixes that and has some small cosmetic adjustments\nto the template. One functional change for debugging build is that\nRT_STATS now prints out the number of leaves. I'll squash and push\n0001 tomorrow morning unless there are further comments.", "msg_date": "Wed, 6 Mar 2024 18:20:35 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Actually, I forgot -- I had one more question: Masahiko, is there a\nreason for this extra local variable, which uses the base type, rather\nthan the typedef'd parameter?\n\n+RT_SCOPE RT_RADIX_TREE *\n+RT_ATTACH(dsa_area *dsa, RT_HANDLE handle)\n+{\n+ RT_RADIX_TREE *tree;\n+ dsa_pointer control;\n+\n+ tree = (RT_RADIX_TREE *) palloc0(sizeof(RT_RADIX_TREE));\n+\n+ /* Find the control object in shared memory */\n+ control = handle;\n\n\n", "msg_date": "Wed, 6 Mar 2024 18:25:21 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 6, 2024 at 8:25 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> Actually, I forgot -- I had one more question: Masahiko, is there a\n> reason for this extra local variable, which uses the base type, rather\n> than the typedef'd parameter?\n>\n> +RT_SCOPE RT_RADIX_TREE *\n> +RT_ATTACH(dsa_area *dsa, RT_HANDLE handle)\n> +{\n> + RT_RADIX_TREE *tree;\n> + dsa_pointer control;\n> +\n> + tree = (RT_RADIX_TREE *) palloc0(sizeof(RT_RADIX_TREE));\n> +\n> + /* Find the control object in shared memory */\n> + control = handle;\n\nI think it's mostly because of readability; it makes clear that the\nhandle should be castable to dsa_pointer and it's a control object. I\nborrowed it from dshash_attach().\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 6 Mar 2024 20:58:51 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 6, 2024 at 8:20 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Mar 5, 2024 at 11:12 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > I'd like to push 0001 and 0002 shortly, and then do another sweep over\n> > > 0003, with remaining feedback, and get that in so we get some\n> > > buildfarm testing before the remaining polishing work on\n> > > tidstore/vacuum.\n> >\n> > Sounds a reasonable plan. 0001 and 0002 look good to me. I'm going to\n> > polish tidstore and vacuum patches and update commit messages.\n>\n> I don't think v66 got a CI run because of vacuumlazy.c bitrot, so I'm\n> attaching v67 which fixes that and has some small cosmetic adjustments\n> to the template.\n\nThank you for updating the patch.\n\n> One functional change for debugging build is that\n> RT_STATS now prints out the number of leaves. I'll squash and push\n> 0001 tomorrow morning unless there are further comments.\n\nThe 0001 patch looks good to me. I have some minor comments:\n\n--- /dev/null\n+++ b/src/test/modules/test_radixtree/Makefile\n@@ -0,0 +1,23 @@\n+# src/test/modules/test_radixtree/Makefile\n+\n+MODULE_big = test_radixtree\n+OBJS = \\\n+ $(WIN32RES) \\\n+ test_radixtree.o\n+PGFILEDESC = \"test_radixtree - test code for src/backend/lib/radixtree.c\"\n+\n\n\"src/backend/lib/radixtree.c\" should be updated to\n\"src/include/lib/radixtree.h\".\n\n---\n--- /dev/null\n+++ b/src/test/modules/test_radixtree/README\n@@ -0,0 +1,7 @@\n+test_integerset contains unit tests for testing the integer set implementation\n+in src/backend/lib/integerset.c.\n+\n+The tests verify the correctness of the implementation, but they can also be\n+used as a micro-benchmark. If you set the 'intset_test_stats' flag in\n+test_integerset.c, the tests will print extra information about execution time\n+and memory usage.\n\nThis file is not updated for test_radixtree. I think we can remove it\nas the test cases in test_radixtree are clear.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 6 Mar 2024 23:13:17 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 6, 2024 at 6:59 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > + /* Find the control object in shared memory */\n> > + control = handle;\n>\n> I think it's mostly because of readability; it makes clear that the\n> handle should be castable to dsa_pointer and it's a control object. I\n> borrowed it from dshash_attach().\n\nI find that a bit strange, but I went ahead and kept it.\n\n\n\nOn Wed, Mar 6, 2024 at 9:13 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> The 0001 patch looks good to me. I have some minor comments:\n\n> +PGFILEDESC = \"test_radixtree - test code for src/backend/lib/radixtree.c\"\n> +\n>\n> \"src/backend/lib/radixtree.c\" should be updated to\n> \"src/include/lib/radixtree.h\".\n\nDone.\n\n> --- /dev/null\n> +++ b/src/test/modules/test_radixtree/README\n> @@ -0,0 +1,7 @@\n> +test_integerset contains unit tests for testing the integer set implementation\n> +in src/backend/lib/integerset.c.\n> +\n> +The tests verify the correctness of the implementation, but they can also be\n> +used as a micro-benchmark. If you set the 'intset_test_stats' flag in\n> +test_integerset.c, the tests will print extra information about execution time\n> +and memory usage.\n>\n> This file is not updated for test_radixtree. I think we can remove it\n> as the test cases in test_radixtree are clear.\n\nDone. I pushed this with a few last-minute cosmetic adjustments. This\nhas been a very long time coming, but we're finally in the home\nstretch!\n\nAlready, I see sifaka doesn't like this, and I'm looking now...\n\n\n", "msg_date": "Thu, 7 Mar 2024 12:55:22 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 12:55 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Wed, Mar 6, 2024 at 6:59 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > + /* Find the control object in shared memory */\n> > > + control = handle;\n> >\n> > I think it's mostly because of readability; it makes clear that the\n> > handle should be castable to dsa_pointer and it's a control object. I\n> > borrowed it from dshash_attach().\n>\n> I find that a bit strange, but I went ahead and kept it.\n>\n>\n>\n> On Wed, Mar 6, 2024 at 9:13 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > The 0001 patch looks good to me. I have some minor comments:\n>\n> > +PGFILEDESC = \"test_radixtree - test code for src/backend/lib/radixtree.c\"\n> > +\n> >\n> > \"src/backend/lib/radixtree.c\" should be updated to\n> > \"src/include/lib/radixtree.h\".\n>\n> Done.\n>\n> > --- /dev/null\n> > +++ b/src/test/modules/test_radixtree/README\n> > @@ -0,0 +1,7 @@\n> > +test_integerset contains unit tests for testing the integer set implementation\n> > +in src/backend/lib/integerset.c.\n> > +\n> > +The tests verify the correctness of the implementation, but they can also be\n> > +used as a micro-benchmark. If you set the 'intset_test_stats' flag in\n> > +test_integerset.c, the tests will print extra information about execution time\n> > +and memory usage.\n> >\n> > This file is not updated for test_radixtree. I think we can remove it\n> > as the test cases in test_radixtree are clear.\n>\n> Done. I pushed this with a few last-minute cosmetic adjustments. This\n> has been a very long time coming, but we're finally in the home\n> stretch!\n>\n> Already, I see sifaka doesn't like this, and I'm looking now...\n\nIt's complaining that these forward declarations...\n\n/* generate forward declarations necessary to use the radix tree */\n#ifdef RT_DECLARE\n\ntypedef struct RT_RADIX_TREE RT_RADIX_TREE;\ntypedef struct RT_ITER RT_ITER;\n\n... cause \"error: redefinition of typedef 'rt_radix_tree' is a C11\nfeature [-Werror,-Wtypedef-redefinition]\"\n\nI'll look in the other templates to see if what they do.\n\n\n", "msg_date": "Thu, 7 Mar 2024 12:59:03 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 12:59 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 12:55 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Wed, Mar 6, 2024 at 6:59 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > > + /* Find the control object in shared memory */\n> > > > + control = handle;\n> > >\n> > > I think it's mostly because of readability; it makes clear that the\n> > > handle should be castable to dsa_pointer and it's a control object. I\n> > > borrowed it from dshash_attach().\n> >\n> > I find that a bit strange, but I went ahead and kept it.\n> >\n> >\n> >\n> > On Wed, Mar 6, 2024 at 9:13 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > The 0001 patch looks good to me. I have some minor comments:\n> >\n> > > +PGFILEDESC = \"test_radixtree - test code for src/backend/lib/radixtree.c\"\n> > > +\n> > >\n> > > \"src/backend/lib/radixtree.c\" should be updated to\n> > > \"src/include/lib/radixtree.h\".\n> >\n> > Done.\n> >\n> > > --- /dev/null\n> > > +++ b/src/test/modules/test_radixtree/README\n> > > @@ -0,0 +1,7 @@\n> > > +test_integerset contains unit tests for testing the integer set implementation\n> > > +in src/backend/lib/integerset.c.\n> > > +\n> > > +The tests verify the correctness of the implementation, but they can also be\n> > > +used as a micro-benchmark. If you set the 'intset_test_stats' flag in\n> > > +test_integerset.c, the tests will print extra information about execution time\n> > > +and memory usage.\n> > >\n> > > This file is not updated for test_radixtree. I think we can remove it\n> > > as the test cases in test_radixtree are clear.\n> >\n> > Done. I pushed this with a few last-minute cosmetic adjustments. This\n> > has been a very long time coming, but we're finally in the home\n> > stretch!\n> >\n> > Already, I see sifaka doesn't like this, and I'm looking now...\n>\n> It's complaining that these forward declarations...\n>\n> /* generate forward declarations necessary to use the radix tree */\n> #ifdef RT_DECLARE\n>\n> typedef struct RT_RADIX_TREE RT_RADIX_TREE;\n> typedef struct RT_ITER RT_ITER;\n>\n> ... cause \"error: redefinition of typedef 'rt_radix_tree' is a C11\n> feature [-Werror,-Wtypedef-redefinition]\"\n>\n> I'll look in the other templates to see if what they do.\n\nTheir \"declare\" sections have full typedefs. I found it works to leave\nout the typedef for the \"define\" section, but I first want to\nreproduce the build failure.\n\nIn addition, olingo and grassquit are showing different kinds of\n\"AddressSanitizer: odr-violation\" errors, which I'm not sure what to\nmake of -- example:\n\n==1862767==ERROR: AddressSanitizer: odr-violation (0x7fc257476b60):\n [1] size=256 'pg_leftmost_one_pos'\n/home/bf/bf-build/olingo/HEAD/pgsql.build/../pgsql/src/port/pg_bitutils.c:34\n [2] size=256 'pg_leftmost_one_pos'\n/home/bf/bf-build/olingo/HEAD/pgsql.build/../pgsql/src/port/pg_bitutils.c:34\nThese globals were registered at these points:\n [1]:\n #0 0x563564b97bf6 in __asan_register_globals\n(/home/bf/bf-build/olingo/HEAD/pgsql.build/tmp_install/home/bf/bf-build/olingo/HEAD/inst/bin/postgres+0x3e2bf6)\n(BuildId: e2ff70bf14f342e03f451bba119134a49a50b8b8)\n #1 0x563564b98d1d in __asan_register_elf_globals\n(/home/bf/bf-build/olingo/HEAD/pgsql.build/tmp_install/home/bf/bf-build/olingo/HEAD/inst/bin/postgres+0x3e3d1d)\n(BuildId: e2ff70bf14f342e03f451bba119134a49a50b8b8)\n #2 0x7fc265c3fe3d in call_init elf/dl-init.c:74:3\n #3 0x7fc265c3fe3d in call_init elf/dl-init.c:26:1\n\n [2]:\n #0 0x563564b97bf6 in __asan_register_globals\n(/home/bf/bf-build/olingo/HEAD/pgsql.build/tmp_install/home/bf/bf-build/olingo/HEAD/inst/bin/postgres+0x3e2bf6)\n(BuildId: e2ff70bf14f342e03f451bba119134a49a50b8b8)\n #1 0x563564b98d1d in __asan_register_elf_globals\n(/home/bf/bf-build/olingo/HEAD/pgsql.build/tmp_install/home/bf/bf-build/olingo/HEAD/inst/bin/postgres+0x3e3d1d)\n(BuildId: e2ff70bf14f342e03f451bba119134a49a50b8b8)\n #2 0x7fc2649847f5 in call_init csu/../csu/libc-start.c:145:3\n #3 0x7fc2649847f5 in __libc_start_main csu/../csu/libc-start.c:347:5\n\n\n", "msg_date": "Thu, 7 Mar 2024 13:19:46 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 3:20 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 12:59 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Thu, Mar 7, 2024 at 12:55 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > >\n> > > On Wed, Mar 6, 2024 at 6:59 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > >\n> > > > > + /* Find the control object in shared memory */\n> > > > > + control = handle;\n> > > >\n> > > > I think it's mostly because of readability; it makes clear that the\n> > > > handle should be castable to dsa_pointer and it's a control object. I\n> > > > borrowed it from dshash_attach().\n> > >\n> > > I find that a bit strange, but I went ahead and kept it.\n> > >\n> > >\n> > >\n> > > On Wed, Mar 6, 2024 at 9:13 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > > The 0001 patch looks good to me. I have some minor comments:\n> > >\n> > > > +PGFILEDESC = \"test_radixtree - test code for src/backend/lib/radixtree.c\"\n> > > > +\n> > > >\n> > > > \"src/backend/lib/radixtree.c\" should be updated to\n> > > > \"src/include/lib/radixtree.h\".\n> > >\n> > > Done.\n> > >\n> > > > --- /dev/null\n> > > > +++ b/src/test/modules/test_radixtree/README\n> > > > @@ -0,0 +1,7 @@\n> > > > +test_integerset contains unit tests for testing the integer set implementation\n> > > > +in src/backend/lib/integerset.c.\n> > > > +\n> > > > +The tests verify the correctness of the implementation, but they can also be\n> > > > +used as a micro-benchmark. If you set the 'intset_test_stats' flag in\n> > > > +test_integerset.c, the tests will print extra information about execution time\n> > > > +and memory usage.\n> > > >\n> > > > This file is not updated for test_radixtree. I think we can remove it\n> > > > as the test cases in test_radixtree are clear.\n> > >\n> > > Done. I pushed this with a few last-minute cosmetic adjustments. This\n> > > has been a very long time coming, but we're finally in the home\n> > > stretch!\n> > >\n> > > Already, I see sifaka doesn't like this, and I'm looking now...\n> >\n> > It's complaining that these forward declarations...\n> >\n> > /* generate forward declarations necessary to use the radix tree */\n> > #ifdef RT_DECLARE\n> >\n> > typedef struct RT_RADIX_TREE RT_RADIX_TREE;\n> > typedef struct RT_ITER RT_ITER;\n> >\n> > ... cause \"error: redefinition of typedef 'rt_radix_tree' is a C11\n> > feature [-Werror,-Wtypedef-redefinition]\"\n> >\n> > I'll look in the other templates to see if what they do.\n>\n> Their \"declare\" sections have full typedefs. I found it works to leave\n> out the typedef for the \"define\" section, but I first want to\n> reproduce the build failure.\n\nRight. I've reproduced this build failure on my machine by specifying\nflags \"-Wtypedef-redefinition -std=gnu99\" to clang. Something the\nbelow change seems to fix the problem:\n\n--- a/src/include/lib/radixtree.h\n+++ b/src/include/lib/radixtree.h\n@@ -676,7 +676,7 @@ typedef struct RT_RADIX_TREE_CONTROL\n } RT_RADIX_TREE_CONTROL;\n\n /* Entry point for allocating and accessing the tree */\n-typedef struct RT_RADIX_TREE\n+struct RT_RADIX_TREE\n {\n MemoryContext context;\n\n@@ -691,7 +691,7 @@ typedef struct RT_RADIX_TREE\n /* leaf_context is used only for single-value leaves */\n MemoryContextData *leaf_context;\n #endif\n-} RT_RADIX_TREE;\n+};\n\n /*\n * Iteration support.\n@@ -714,7 +714,7 @@ typedef struct RT_NODE_ITER\n } RT_NODE_ITER;\n\n /* state for iterating over the whole radix tree */\n-typedef struct RT_ITER\n+struct RT_ITER\n {\n RT_RADIX_TREE *tree;\n\n@@ -728,7 +728,7 @@ typedef struct RT_ITER\n\n /* The key constructed during iteration */\n uint64 key;\n-} RT_ITER;\n+};\n\n\n /* verification (available only in assert-enabled builds) */\n\n>\n> In addition, olingo and grassquit are showing different kinds of\n> \"AddressSanitizer: odr-violation\" errors, which I'm not sure what to\n> make of -- example:\n>\n> ==1862767==ERROR: AddressSanitizer: odr-violation (0x7fc257476b60):\n> [1] size=256 'pg_leftmost_one_pos'\n> /home/bf/bf-build/olingo/HEAD/pgsql.build/../pgsql/src/port/pg_bitutils.c:34\n> [2] size=256 'pg_leftmost_one_pos'\n> /home/bf/bf-build/olingo/HEAD/pgsql.build/../pgsql/src/port/pg_bitutils.c:34\n> These globals were registered at these points:\n> [1]:\n> #0 0x563564b97bf6 in __asan_register_globals\n> (/home/bf/bf-build/olingo/HEAD/pgsql.build/tmp_install/home/bf/bf-build/olingo/HEAD/inst/bin/postgres+0x3e2bf6)\n> (BuildId: e2ff70bf14f342e03f451bba119134a49a50b8b8)\n> #1 0x563564b98d1d in __asan_register_elf_globals\n> (/home/bf/bf-build/olingo/HEAD/pgsql.build/tmp_install/home/bf/bf-build/olingo/HEAD/inst/bin/postgres+0x3e3d1d)\n> (BuildId: e2ff70bf14f342e03f451bba119134a49a50b8b8)\n> #2 0x7fc265c3fe3d in call_init elf/dl-init.c:74:3\n> #3 0x7fc265c3fe3d in call_init elf/dl-init.c:26:1\n>\n> [2]:\n> #0 0x563564b97bf6 in __asan_register_globals\n> (/home/bf/bf-build/olingo/HEAD/pgsql.build/tmp_install/home/bf/bf-build/olingo/HEAD/inst/bin/postgres+0x3e2bf6)\n> (BuildId: e2ff70bf14f342e03f451bba119134a49a50b8b8)\n> #1 0x563564b98d1d in __asan_register_elf_globals\n> (/home/bf/bf-build/olingo/HEAD/pgsql.build/tmp_install/home/bf/bf-build/olingo/HEAD/inst/bin/postgres+0x3e3d1d)\n> (BuildId: e2ff70bf14f342e03f451bba119134a49a50b8b8)\n> #2 0x7fc2649847f5 in call_init csu/../csu/libc-start.c:145:3\n> #3 0x7fc2649847f5 in __libc_start_main csu/../csu/libc-start.c:347:5\n\nI'll look at them too.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 7 Mar 2024 15:27:02 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 3:27 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 3:20 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> >\n> > In addition, olingo and grassquit are showing different kinds of\n> > \"AddressSanitizer: odr-violation\" errors, which I'm not sure what to\n> > make of -- example:\n\nodr-violation seems to refer to One Definition Rule (ODR). According\nto Wikipedia[1]:\n\nThe One Definition Rule (ODR) is an important rule of the C++\nprogramming language that prescribes that classes/structs and\nnon-inline functions cannot have more than one definition in the\nentire program and template and types cannot have more than one\ndefinition by translation unit. It is defined in the ISO C++ Standard\n(ISO/IEC 14882) 2003, at section 3.2. Some other programming languages\nhave similar but differently defined rules towards the same objective.\n\nI don't fully understand this concept yet but are these two different\nbuild failures related?\n\nRegards,\n\n[1] https://en.wikipedia.org/wiki/One_Definition_Rule\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 7 Mar 2024 15:49:18 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 1:27 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 3:20 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Thu, Mar 7, 2024 at 12:59 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > > ... cause \"error: redefinition of typedef 'rt_radix_tree' is a C11\n> > > feature [-Werror,-Wtypedef-redefinition]\"\n> > >\n> > > I'll look in the other templates to see if what they do.\n> >\n> > Their \"declare\" sections have full typedefs. I found it works to leave\n> > out the typedef for the \"define\" section, but I first want to\n> > reproduce the build failure.\n>\n> Right. I've reproduced this build failure on my machine by specifying\n> flags \"-Wtypedef-redefinition -std=gnu99\" to clang. Something the\n> below change seems to fix the problem:\n\nConfirmed, will push shortly.\n\n\n", "msg_date": "Thu, 7 Mar 2024 14:01:02 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 4:01 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 1:27 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Mar 7, 2024 at 3:20 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > >\n> > > On Thu, Mar 7, 2024 at 12:59 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > > ... cause \"error: redefinition of typedef 'rt_radix_tree' is a C11\n> > > > feature [-Werror,-Wtypedef-redefinition]\"\n> > > >\n> > > > I'll look in the other templates to see if what they do.\n> > >\n> > > Their \"declare\" sections have full typedefs. I found it works to leave\n> > > out the typedef for the \"define\" section, but I first want to\n> > > reproduce the build failure.\n> >\n> > Right. I've reproduced this build failure on my machine by specifying\n> > flags \"-Wtypedef-redefinition -std=gnu99\" to clang. Something the\n> > below change seems to fix the problem:\n>\n> Confirmed, will push shortly.\n\nmamba complained different build errors[1]:\n\n 2740 | fprintf(stderr, \"num_keys = %ld\\\\n\", tree->ctl->num_keys);\n | ~~^ ~~~~~~~~~~~~~~~~~~~\n | | |\n | long int int64 {aka long long int}\n | %lld\n../../../../src/include/lib/radixtree.h:2752:30: error: format '%ld'\nexpects argument of type 'long int', but argument 4 has type 'int64'\n{aka 'long long int'} [-Werror=format=]\n 2752 | fprintf(stderr, \", n%d = %ld\", size_class.fanout,\ntree->ctl->num_nodes[i]);\n | ~~^\n~~~~~~~~~~~~~~~~~~~~~~~\n | |\n |\n | long int\n int64 {aka long long int}\n | %lld\n../../../../src/include/lib/radixtree.h:2755:32: error: format '%ld'\nexpects argument of type 'long int', but argument 3 has type 'int64'\n{aka 'long long int'} [-Werror=format=]\n 2755 | fprintf(stderr, \", leaves = %ld\", tree->ctl->num_leaves);\n | ~~^ ~~~~~~~~~~~~~~~~~~~~~\n | | |\n | long int int64 {aka long long int}\n | %lld\n\nRegards,\n\n[1] https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=mamba&dt=2024-03-07%2006%3A05%3A18\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 7 Mar 2024 16:13:50 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 2:14 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 4:01 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Thu, Mar 7, 2024 at 1:27 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Thu, Mar 7, 2024 at 3:20 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > >\n> > > > On Thu, Mar 7, 2024 at 12:59 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > > > > ... cause \"error: redefinition of typedef 'rt_radix_tree' is a C11\n> > > > > feature [-Werror,-Wtypedef-redefinition]\"\n> > > > >\n> > > > > I'll look in the other templates to see if what they do.\n> > > >\n> > > > Their \"declare\" sections have full typedefs. I found it works to leave\n> > > > out the typedef for the \"define\" section, but I first want to\n> > > > reproduce the build failure.\n> > >\n> > > Right. I've reproduced this build failure on my machine by specifying\n> > > flags \"-Wtypedef-redefinition -std=gnu99\" to clang. Something the\n> > > below change seems to fix the problem:\n> >\n> > Confirmed, will push shortly.\n>\n> mamba complained different build errors[1]:\n>\n> 2740 | fprintf(stderr, \"num_keys = %ld\\\\n\", tree->ctl->num_keys);\n> | ~~^ ~~~~~~~~~~~~~~~~~~~\n> | | |\n> | long int int64 {aka long long int}\n> | %lld\n> ../../../../src/include/lib/radixtree.h:2752:30: error: format '%ld'\n> expects argument of type 'long int', but argument 4 has type 'int64'\n> {aka 'long long int'} [-Werror=format=]\n> 2752 | fprintf(stderr, \", n%d = %ld\", size_class.fanout,\n> tree->ctl->num_nodes[i]);\n> | ~~^\n> ~~~~~~~~~~~~~~~~~~~~~~~\n> | |\n> |\n> | long int\n> int64 {aka long long int}\n> | %lld\n> ../../../../src/include/lib/radixtree.h:2755:32: error: format '%ld'\n> expects argument of type 'long int', but argument 3 has type 'int64'\n> {aka 'long long int'} [-Werror=format=]\n> 2755 | fprintf(stderr, \", leaves = %ld\", tree->ctl->num_leaves);\n> | ~~^ ~~~~~~~~~~~~~~~~~~~~~\n> | | |\n> | long int int64 {aka long long int}\n> | %lld\n>\n> Regards,\n>\n> [1] https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=mamba&dt=2024-03-07%2006%3A05%3A18\n\nYeah, the attached fixes it for me.", "msg_date": "Thu, 7 Mar 2024 14:21:03 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 4:21 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 2:14 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Mar 7, 2024 at 4:01 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > >\n> > > On Thu, Mar 7, 2024 at 1:27 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > >\n> > > > On Thu, Mar 7, 2024 at 3:20 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > > >\n> > > > > On Thu, Mar 7, 2024 at 12:59 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > >\n> > > > > > ... cause \"error: redefinition of typedef 'rt_radix_tree' is a C11\n> > > > > > feature [-Werror,-Wtypedef-redefinition]\"\n> > > > > >\n> > > > > > I'll look in the other templates to see if what they do.\n> > > > >\n> > > > > Their \"declare\" sections have full typedefs. I found it works to leave\n> > > > > out the typedef for the \"define\" section, but I first want to\n> > > > > reproduce the build failure.\n> > > >\n> > > > Right. I've reproduced this build failure on my machine by specifying\n> > > > flags \"-Wtypedef-redefinition -std=gnu99\" to clang. Something the\n> > > > below change seems to fix the problem:\n> > >\n> > > Confirmed, will push shortly.\n> >\n> > mamba complained different build errors[1]:\n> >\n> > 2740 | fprintf(stderr, \"num_keys = %ld\\\\n\", tree->ctl->num_keys);\n> > | ~~^ ~~~~~~~~~~~~~~~~~~~\n> > | | |\n> > | long int int64 {aka long long int}\n> > | %lld\n> > ../../../../src/include/lib/radixtree.h:2752:30: error: format '%ld'\n> > expects argument of type 'long int', but argument 4 has type 'int64'\n> > {aka 'long long int'} [-Werror=format=]\n> > 2752 | fprintf(stderr, \", n%d = %ld\", size_class.fanout,\n> > tree->ctl->num_nodes[i]);\n> > | ~~^\n> > ~~~~~~~~~~~~~~~~~~~~~~~\n> > | |\n> > |\n> > | long int\n> > int64 {aka long long int}\n> > | %lld\n> > ../../../../src/include/lib/radixtree.h:2755:32: error: format '%ld'\n> > expects argument of type 'long int', but argument 3 has type 'int64'\n> > {aka 'long long int'} [-Werror=format=]\n> > 2755 | fprintf(stderr, \", leaves = %ld\", tree->ctl->num_leaves);\n> > | ~~^ ~~~~~~~~~~~~~~~~~~~~~\n> > | | |\n> > | long int int64 {aka long long int}\n> > | %lld\n> >\n> > Regards,\n> >\n> > [1] https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=mamba&dt=2024-03-07%2006%3A05%3A18\n>\n> Yeah, the attached fixes it for me.\n\nThanks, LGTM.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 7 Mar 2024 16:32:21 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 1:49 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> odr-violation seems to refer to One Definition Rule (ODR). According\n> to Wikipedia[1]:\n>\n> The One Definition Rule (ODR) is an important rule of the C++\n> programming language that prescribes that classes/structs and\n> non-inline functions cannot have more than one definition in the\n> entire program and template and types cannot have more than one\n> definition by translation unit. It is defined in the ISO C++ Standard\n> (ISO/IEC 14882) 2003, at section 3.2. Some other programming languages\n> have similar but differently defined rules towards the same objective.\n>\n> I don't fully understand this concept yet but are these two different\n> build failures related?\n\nI thought it may have something to do with the prerequisite commit\nthat moved some symbols from bitmapset.c to .h:\n\n/* Select appropriate bit-twiddling functions for bitmap word size */\n#if BITS_PER_BITMAPWORD == 32\n#define bmw_leftmost_one_pos(w) pg_leftmost_one_pos32(w)\n#define bmw_rightmost_one_pos(w) pg_rightmost_one_pos32(w)\n#define bmw_popcount(w) pg_popcount32(w)\n#elif BITS_PER_BITMAPWORD == 64\n#define bmw_leftmost_one_pos(w) pg_leftmost_one_pos64(w)\n#define bmw_rightmost_one_pos(w) pg_rightmost_one_pos64(w)\n#define bmw_popcount(w) pg_popcount64(w)\n#else\n#error \"invalid BITS_PER_BITMAPWORD\"\n#endif\n\n...but olingo's error seems strange to me, because it is complaining\nof pg_leftmost_one_pos, which refers to the lookup table in\npg_bitutils.c -- I thought all buildfarm members used the bitscan\ninstructions.\n\ngrassquit is complaining of pg_popcount64, which is a global function,\nalso in pg_bitutils.c. Not sure what to make of this, since we're just\npointing symbols at things which should have a single definition...\n\n\n", "msg_date": "Thu, 7 Mar 2024 15:18:11 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 1:19 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> In addition, olingo and grassquit are showing different kinds of\n> \"AddressSanitizer: odr-violation\" errors, which I'm not sure what to\n> make of -- example:\n\nThis might be relevant:\n\n$ git grep 'link_with: pgport_srv'\nsrc/test/modules/test_radixtree/meson.build: link_with: pgport_srv,\n\nNo other test module uses this directive, and indeed, removing this\nstill builds fine for me. Thoughts?\n\n\n", "msg_date": "Thu, 7 Mar 2024 16:37:10 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 6:37 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 1:19 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > In addition, olingo and grassquit are showing different kinds of\n> > \"AddressSanitizer: odr-violation\" errors, which I'm not sure what to\n> > make of -- example:\n>\n> This might be relevant:\n>\n> $ git grep 'link_with: pgport_srv'\n> src/test/modules/test_radixtree/meson.build: link_with: pgport_srv,\n>\n> No other test module uses this directive, and indeed, removing this\n> still builds fine for me. Thoughts?\n\nYeah, it could be the culprit. The test_radixtree/meson.build is the\nsole extension that explicitly specifies a link with pgport_srv. I\nthink we can get rid of it as I've also confirmed the build still fine\neven without it.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 7 Mar 2024 18:46:48 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 4:47 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 6:37 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > $ git grep 'link_with: pgport_srv'\n> > src/test/modules/test_radixtree/meson.build: link_with: pgport_srv,\n> >\n> > No other test module uses this directive, and indeed, removing this\n> > still builds fine for me. Thoughts?\n>\n> Yeah, it could be the culprit. The test_radixtree/meson.build is the\n> sole extension that explicitly specifies a link with pgport_srv. I\n> think we can get rid of it as I've also confirmed the build still fine\n> even without it.\n\nolingo and grassquit have turned green, so that must have been it.\n\n\n", "msg_date": "Thu, 7 Mar 2024 18:06:13 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 8:06 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 4:47 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Mar 7, 2024 at 6:37 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > $ git grep 'link_with: pgport_srv'\n> > > src/test/modules/test_radixtree/meson.build: link_with: pgport_srv,\n> > >\n> > > No other test module uses this directive, and indeed, removing this\n> > > still builds fine for me. Thoughts?\n> >\n> > Yeah, it could be the culprit. The test_radixtree/meson.build is the\n> > sole extension that explicitly specifies a link with pgport_srv. I\n> > think we can get rid of it as I've also confirmed the build still fine\n> > even without it.\n>\n> olingo and grassquit have turned green, so that must have been it.\n\nCool!\n\nI've attached the remaining patches for CI. I've made some minor\nchanges in separate patches and drafted the commit message for\ntidstore patch.\n\nWhile reviewing the tidstore code, I thought that it would be more\nappropriate to place tidstore.c under src/backend/lib instead of\nsrc/backend/common/access since the tidstore is no longer implemented\nonly for heap or other access methods, and it might also be used by\nexecutor nodes in the future. What do you think?\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Fri, 8 Mar 2024 00:34:48 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 8:06 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 4:47 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Mar 7, 2024 at 6:37 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > $ git grep 'link_with: pgport_srv'\n> > > src/test/modules/test_radixtree/meson.build: link_with: pgport_srv,\n> > >\n> > > No other test module uses this directive, and indeed, removing this\n> > > still builds fine for me. Thoughts?\n> >\n> > Yeah, it could be the culprit. The test_radixtree/meson.build is the\n> > sole extension that explicitly specifies a link with pgport_srv. I\n> > think we can get rid of it as I've also confirmed the build still fine\n> > even without it.\n>\n> olingo and grassquit have turned green, so that must have been it.\n\nfairywren is complaining another build failure:\n\n[1931/2156] \"gcc\" -o\nsrc/test/modules/test_radixtree/test_radixtree.dll\nsrc/test/modules/test_radixtree/test_radixtree.dll.p/win32ver.obj\nsrc/test/modules/test_radixtree/test_radixtree.dll.p/test_radixtree.c.obj\n\"-Wl,--allow-shlib-undefined\" \"-shared\" \"-Wl,--start-group\"\n\"-Wl,--out-implib=src/test\\\\modules\\\\test_radixtree\\\\test_radixtree.dll.a\"\n\"-Wl,--stack,4194304\" \"-Wl,--allow-multiple-definition\"\n\"-Wl,--disable-auto-import\" \"-fvisibility=hidden\"\n\"C:/tools/nmsys64/home/pgrunner/bf/root/HEAD/pgsql.build/src/backend/libpostgres.exe.a\"\n\"-pthread\" \"C:/tools/nmsys64/ucrt64/bin/../lib/libssl.dll.a\"\n\"C:/tools/nmsys64/ucrt64/bin/../lib/libcrypto.dll.a\"\n\"C:/tools/nmsys64/ucrt64/bin/../lib/libz.dll.a\" \"-lws2_32\" \"-lm\"\n\"-lkernel32\" \"-luser32\" \"-lgdi32\" \"-lwinspool\" \"-lshell32\" \"-lole32\"\n\"-loleaut32\" \"-luuid\" \"-lcomdlg32\" \"-ladvapi32\" \"-Wl,--end-group\"\nFAILED: src/test/modules/test_radixtree/test_radixtree.dll\n\"gcc\" -o src/test/modules/test_radixtree/test_radixtree.dll\nsrc/test/modules/test_radixtree/test_radixtree.dll.p/win32ver.obj\nsrc/test/modules/test_radixtree/test_radixtree.dll.p/test_radixtree.c.obj\n\"-Wl,--allow-shlib-undefined\" \"-shared\" \"-Wl,--start-group\"\n\"-Wl,--out-implib=src/test\\\\modules\\\\test_radixtree\\\\test_radixtree.dll.a\"\n\"-Wl,--stack,4194304\" \"-Wl,--allow-multiple-definition\"\n\"-Wl,--disable-auto-import\" \"-fvisibility=hidden\"\n\"C:/tools/nmsys64/home/pgrunner/bf/root/HEAD/pgsql.build/src/backend/libpostgres.exe.a\"\n\"-pthread\" \"C:/tools/nmsys64/ucrt64/bin/../lib/libssl.dll.a\"\n\"C:/tools/nmsys64/ucrt64/bin/../lib/libcrypto.dll.a\"\n\"C:/tools/nmsys64/ucrt64/bin/../lib/libz.dll.a\" \"-lws2_32\" \"-lm\"\n\"-lkernel32\" \"-luser32\" \"-lgdi32\" \"-lwinspool\" \"-lshell32\" \"-lole32\"\n\"-loleaut32\" \"-luuid\" \"-lcomdlg32\" \"-ladvapi32\" \"-Wl,--end-group\"\nC:/tools/nmsys64/ucrt64/bin/../lib/gcc/x86_64-w64-mingw32/12.2.0/../../../../x86_64-w64-mingw32/bin/ld.exe:\nsrc/test/modules/test_radixtree/test_radixtree.dll.p/test_radixtree.c.obj:test_radixtree:(.rdata$.refptr.pg_popcount64[.refptr.pg_popcount64]+0x0):\nundefined reference to `pg_popcount64'\n\nIt looks like it requires a link with pgport_srv but I'm not sure. It\nseems that the recent commit 1f1d73a8b breaks CI, Windows - Server\n2019, VS 2019 - Meson & ninja, too.\n\nRegards,\n\n[1] https://buildfarm.postgresql.org/cgi-bin/show_log.pl?nm=fairywren&dt=2024-03-07%2012%3A53%3A20\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 8 Mar 2024 01:14:41 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 11:15 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> It looks like it requires a link with pgport_srv but I'm not sure. It\n> seems that the recent commit 1f1d73a8b breaks CI, Windows - Server\n> 2019, VS 2019 - Meson & ninja, too.\n\nUnfortunately, none of the Windows animals happened to run both after\nthe initial commit and before removing the (seemingly useless on our\ndaily platfoms) link. I'll confirm on my own CI branch in a few\nminutes.\n\n\n", "msg_date": "Fri, 8 Mar 2024 08:04:39 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 8, 2024 at 10:04 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 11:15 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > It looks like it requires a link with pgport_srv but I'm not sure. It\n> > seems that the recent commit 1f1d73a8b breaks CI, Windows - Server\n> > 2019, VS 2019 - Meson & ninja, too.\n>\n> Unfortunately, none of the Windows animals happened to run both after\n> the initial commit and before removing the (seemingly useless on our\n> daily platfoms) link. I'll confirm on my own CI branch in a few\n> minutes.\n\nYesterday I've confirmed the something like the below fixes the\nproblem happened in Windows CI:\n\n--- a/src/test/modules/test_radixtree/meson.build\n+++ b/src/test/modules/test_radixtree/meson.build\n@@ -12,6 +12,7 @@ endif\n\n test_radixtree = shared_module('test_radixtree',\n test_radixtree_sources,\n+ link_with: host_system == 'windows' ? pgport_srv : [],\n kwargs: pg_test_mod_args,\n )\n test_install_libs += test_radixtree\n\nBut I'm not sure it's the right fix especially because I guess it\ncould raise \"AddressSanitizer: odr-violation\" error on Windows.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 8 Mar 2024 10:08:40 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 8, 2024 at 8:09 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> Yesterday I've confirmed the something like the below fixes the\n> problem happened in Windows CI:\n\nGlad you shared before I went and did it.\n\n> --- a/src/test/modules/test_radixtree/meson.build\n> +++ b/src/test/modules/test_radixtree/meson.build\n> @@ -12,6 +12,7 @@ endif\n>\n> test_radixtree = shared_module('test_radixtree',\n> test_radixtree_sources,\n> + link_with: host_system == 'windows' ? pgport_srv : [],\n\nI don't see any similar coding elsewhere, so that leaves me wondering\nif we're missing something. On the other hand, maybe no test modules\nuse files in src/port ...\n\n> kwargs: pg_test_mod_args,\n> )\n> test_install_libs += test_radixtree\n>\n> But I'm not sure it's the right fix especially because I guess it\n> could raise \"AddressSanitizer: odr-violation\" error on Windows.\n\nWell, it's now at zero definitions that it can see, so I imagine it's\npossible that adding the above would not cause more than one. In any\ncase, we might not know since as far as I can tell the MSVC animals\ndon't have address sanitizer. I'll look around some more, and if I\ndon't get any revelations, I guess we should go with the above.\n\n\n", "msg_date": "Fri, 8 Mar 2024 08:31:37 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 8, 2024 at 8:09 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> Yesterday I've confirmed the something like the below fixes the\n> problem happened in Windows CI:\n>\n> --- a/src/test/modules/test_radixtree/meson.build\n> +++ b/src/test/modules/test_radixtree/meson.build\n> @@ -12,6 +12,7 @@ endif\n>\n> test_radixtree = shared_module('test_radixtree',\n> test_radixtree_sources,\n> + link_with: host_system == 'windows' ? pgport_srv : [],\n> kwargs: pg_test_mod_args,\n> )\n> test_install_libs += test_radixtree\n\npgport_srv is for backend, shared libraries should be using pgport_shlib\n\nFurther, the top level meson.build has:\n\n# all shared libraries not part of the backend should depend on this\nfrontend_shlib_code = declare_dependency(\n include_directories: [postgres_inc],\n link_with: [common_shlib, pgport_shlib],\n sources: generated_headers,\n dependencies: [shlib_code, os_deps, libintl],\n)\n\n...but the only things that declare needing frontend_shlib_code are in\nsrc/interfaces/.\n\nIn any case, I'm trying it in CI branch with pgport_shlib now.\n\n\n", "msg_date": "Fri, 8 Mar 2024 09:53:43 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 8, 2024 at 9:53 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Fri, Mar 8, 2024 at 8:09 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > Yesterday I've confirmed the something like the below fixes the\n> > problem happened in Windows CI:\n> >\n> > --- a/src/test/modules/test_radixtree/meson.build\n> > +++ b/src/test/modules/test_radixtree/meson.build\n> > @@ -12,6 +12,7 @@ endif\n> >\n> > test_radixtree = shared_module('test_radixtree',\n> > test_radixtree_sources,\n> > + link_with: host_system == 'windows' ? pgport_srv : [],\n> > kwargs: pg_test_mod_args,\n> > )\n> > test_install_libs += test_radixtree\n>\n> pgport_srv is for backend, shared libraries should be using pgport_shlib\n\n> In any case, I'm trying it in CI branch with pgport_shlib now.\n\nThat seems to work, so I'll push that just to get things green again.\n\n\n", "msg_date": "Fri, 8 Mar 2024 10:22:41 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 7, 2024 at 10:35 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> I've attached the remaining patches for CI. I've made some minor\n> changes in separate patches and drafted the commit message for\n> tidstore patch.\n>\n> While reviewing the tidstore code, I thought that it would be more\n> appropriate to place tidstore.c under src/backend/lib instead of\n> src/backend/common/access since the tidstore is no longer implemented\n> only for heap or other access methods, and it might also be used by\n> executor nodes in the future. What do you think?\n\nThat's a heck of a good question. I don't think src/backend/lib is\nright -- it seems that's for general-purpose data structures.\nSomething like backend/utils is also too general.\nsrc/backend/access/common has things for tuple descriptors, toast,\nsessions, and I don't think tidstore is out of place here. I'm not\nsure there's a better place, but I could be convinced otherwise.\n\nv68-0001:\n\nI'm not sure if commit messages are much a subject of review, and it's\nup to the committer, but I'll share a couple comments just as\nsomething to think about, not something I would ask you to change: I\nthink it's a bit distracting that the commit message talks about the\njustification to use it for vacuum. Let's save that for the commit\nwith actual vacuum changes. Also, I suspect saying there are a \"wide\nrange\" of uses is over-selling it a bit, and that paragraph is a bit\nawkward aside from that.\n\n+ /* Collect TIDs extracted from the key-value pair */\n+ result->num_offsets = 0;\n+\n\nThis comment has nothing at all to do with this line. If the comment\nis for several lines following, some of which are separated by blank\nlines, there should be a blank line after the comment. Also, why isn't\ntidstore_iter_extract_tids() responsible for setting that to zero?\n\n+ ts->context = CurrentMemoryContext;\n\nAs far as I can tell, this member is never accessed again -- am I\nmissing something?\n\n+ /* DSA for tidstore will be detached at the end of session */\n\nNo other test module pins the mapping, but that doesn't necessarily\nmean it's wrong. Is there some advantage over explicitly detaching?\n\n+-- Add tids in random order.\n\nI don't see any randomization here. I do remember adding row_number to\nremove whitespace in the output, but I don't remember a random order.\nOn that subject, the row_number was an easy trick to avoid extra\nwhitespace, but maybe we should just teach the setting function to\nreturn blocknumber rather than null?\n\n+Datum\n+tidstore_create(PG_FUNCTION_ARGS)\n+{\n...\n+ tidstore = TidStoreCreate(max_bytes, dsa);\n\n+Datum\n+tidstore_set_block_offsets(PG_FUNCTION_ARGS)\n+{\n....\n+ TidStoreSetBlockOffsets(tidstore, blkno, offs, noffs);\n\nThese names are too similar. Maybe the test module should do\ns/tidstore_/test_/ or similar.\n\n+/* Sanity check if we've called tidstore_create() */\n+static void\n+check_tidstore_available(void)\n+{\n+ if (tidstore == NULL)\n+ elog(ERROR, \"tidstore is not initialized\");\n+}\n\nI don't find this very helpful. If a developer wiped out the create\ncall, wouldn't the test crash and burn pretty obviously?\n\nIn general, the .sql file is still very hard-coded. Functions are\ncreated that contain a VALUES statement. Maybe it's okay for now, but\nwanted to mention it. Ideally, we'd have some randomized tests,\nwithout having to display it. That could be in addition to (not\nreplacing) the small tests we have that display input. (see below)\n\n\nv68-0002:\n\n@@ -329,6 +381,13 @@ TidStoreIsMember(TidStore *ts, ItemPointer tid)\n\n ret = (page->words[wordnum] & ((bitmapword) 1 << bitnum)) != 0;\n\n+#ifdef TIDSTORE_DEBUG\n+ if (!TidStoreIsShared(ts))\n+ {\n+ bool ret_debug = ts_debug_is_member(ts, tid);;\n+ Assert(ret == ret_debug);\n+ }\n+#endif\n\nThis only checking the case where we haven't returned already. In particular...\n\n+ /* no entry for the blk */\n+ if (page == NULL)\n+ return false;\n+\n+ wordnum = WORDNUM(off);\n+ bitnum = BITNUM(off);\n+\n+ /* no bitmap for the off */\n+ if (wordnum >= page->nwords)\n+ return false;\n\n...these results are not checked.\n\nMore broadly, it seems like the test module should be able to test\neverything that the debug-build array would complain about. Including\nordered iteration. This may require first saving our test input to a\ntable. We could create a cursor on a query that fetches the ordered\ninput from the table and verifies that the tid store iterate produces\nthe same ordered set, maybe with pl/pgSQL. Or something like that.\nSeems like not a whole lot of work. I can try later in the week, if\nyou like.\n\nv68-0005/6 look ready to squash\n\nv68-0008 - I'm not a fan of captilizing short comment fragments. I use\nthe style of either: short lower-case phrases, or full sentences\nincluding capitalization, correct grammar and period. I see these two\nstyles all over the code base, as appropriate.\n\n+ /* Remain attached until end of backend */\n\nWe'll probably want this comment, if in fact we want this behavior.\n\n+ /*\n+ * Note that funcctx->call_cntr is incremented in SRF_RETURN_NEXT\n+ * before return.\n+ */\n\nI'm not sure what this is trying to say or why it's relevant, since\nit's been a while since I've written a SRF in C.\n\nThat's all I have for now, and I haven't looked at the vacuum changes this time.\n\n\n", "msg_date": "Mon, 11 Mar 2024 10:19:52 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Feb 16, 2024 at 10:05 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Feb 15, 2024 at 8:26 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > v61-0007: Runtime-embeddable tids -- Optional for v17, but should\n> > reduce memory regressions, so should be considered. Up to 3 tids can\n> > be stored in the last level child pointer. It's not polished, but I'll\n> > only proceed with that if we think we need this. \"flags\" iis called\n> > that because it could hold tidbitmap.c booleans (recheck, lossy) in\n> > the future, in addition to reserving space for the pointer tag. Note:\n> > I hacked the tests to only have 2 offsets per block to demo, but of\n> > course both paths should be tested.\n>\n> Interesting. I've run the same benchmark tests we did[1][2] (the\n> median of 3 runs):\n[found a big speed-up where we don't expect one]\n\nI tried to reproduce this (similar patch, but rebased on top of a bug\nyou recently fixed (possibly related?) -- attached, and also shows one\nway to address some lack of coverage in the debug build, for as long\nas we test that with CI).\n\nFortunately I cannot see a difference, so I believe it's not affecting\nthe case in this test all, as expected:\n\nv68:\n\nINFO: finished vacuuming \"john.public.test\": index scans: 1\npages: 0 removed, 442478 remain, 88478 scanned (20.00% of total)\ntuples: 19995999 removed, 80003979 remain, 0 are dead but not yet removable\nremovable cutoff: 770, which was 0 XIDs old when operation ended\nfrozen: 0 pages from table (0.00% of total) had 0 tuples frozen\nindex scan needed: 88478 pages from table (20.00% of total) had\n19995999 dead item identifiers removed\nindex \"test_x_idx\": pages: 274194 in total, 54822 newly deleted, 54822\ncurrently deleted, 0 reusable\navg read rate: 620.356 MB/s, avg write rate: 124.105 MB/s\nbuffer usage: 758236 hits, 274196 misses, 54854 dirtied\nWAL usage: 2 records, 0 full page images, 425 bytes\n\nsystem usage: CPU: user: 3.74 s, system: 0.68 s, elapsed: 4.45 s\nsystem usage: CPU: user: 3.02 s, system: 0.42 s, elapsed: 3.47 s\nsystem usage: CPU: user: 3.09 s, system: 0.38 s, elapsed: 3.49 s\nsystem usage: CPU: user: 3.00 s, system: 0.43 s, elapsed: 3.45 s\n\nv68 + emb values (that cannot be used because > 3 tids per block):\n\nINFO: finished vacuuming \"john.public.test\": index scans: 1\npages: 0 removed, 442478 remain, 88478 scanned (20.00% of total)\ntuples: 19995999 removed, 80003979 remain, 0 are dead but not yet removable\nremovable cutoff: 775, which was 0 XIDs old when operation ended\nfrozen: 0 pages from table (0.00% of total) had 0 tuples frozen\nindex scan needed: 88478 pages from table (20.00% of total) had\n19995999 dead item identifiers removed\nindex \"test_x_idx\": pages: 274194 in total, 54822 newly deleted, 54822\ncurrently deleted, 0 reusable\navg read rate: 570.808 MB/s, avg write rate: 114.192 MB/s\nbuffer usage: 758236 hits, 274196 misses, 54854 dirtied\nWAL usage: 2 records, 0 full page images, 425 bytes\n\nsystem usage: CPU: user: 3.11 s, system: 0.62 s, elapsed: 3.75 s\nsystem usage: CPU: user: 3.04 s, system: 0.41 s, elapsed: 3.46 s\nsystem usage: CPU: user: 3.05 s, system: 0.41 s, elapsed: 3.47 s\nsystem usage: CPU: user: 3.04 s, system: 0.43 s, elapsed: 3.49 s\n\nI'll continue polishing the runtime-embeddable values patch as time\npermits, for later consideration.", "msg_date": "Mon, 11 Mar 2024 14:32:20 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 11, 2024 at 12:20 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 7, 2024 at 10:35 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > I've attached the remaining patches for CI. I've made some minor\n> > changes in separate patches and drafted the commit message for\n> > tidstore patch.\n> >\n> > While reviewing the tidstore code, I thought that it would be more\n> > appropriate to place tidstore.c under src/backend/lib instead of\n> > src/backend/common/access since the tidstore is no longer implemented\n> > only for heap or other access methods, and it might also be used by\n> > executor nodes in the future. What do you think?\n>\n> That's a heck of a good question. I don't think src/backend/lib is\n> right -- it seems that's for general-purpose data structures.\n> Something like backend/utils is also too general.\n> src/backend/access/common has things for tuple descriptors, toast,\n> sessions, and I don't think tidstore is out of place here. I'm not\n> sure there's a better place, but I could be convinced otherwise.\n\nYeah, I agreed that src/backend/lib seems not to be the place for\ntidstore. Let's keep it in src/backend/access/common. If others think\ndifferently, we can move it later.\n\n>\n> v68-0001:\n>\n> I'm not sure if commit messages are much a subject of review, and it's\n> up to the committer, but I'll share a couple comments just as\n> something to think about, not something I would ask you to change: I\n> think it's a bit distracting that the commit message talks about the\n> justification to use it for vacuum. Let's save that for the commit\n> with actual vacuum changes. Also, I suspect saying there are a \"wide\n> range\" of uses is over-selling it a bit, and that paragraph is a bit\n> awkward aside from that.\n\nThank you for the comment, and I agreed. I've updated the commit message.\n\n>\n> + /* Collect TIDs extracted from the key-value pair */\n> + result->num_offsets = 0;\n> +\n>\n> This comment has nothing at all to do with this line. If the comment\n> is for several lines following, some of which are separated by blank\n> lines, there should be a blank line after the comment. Also, why isn't\n> tidstore_iter_extract_tids() responsible for setting that to zero?\n\nAgreed, fixed.\n\nI also updated this part so we set result->blkno in\ntidstore_iter_extract_tids() too, which seems more readable.\n\n>\n> + ts->context = CurrentMemoryContext;\n>\n> As far as I can tell, this member is never accessed again -- am I\n> missing something?\n\nYou're right. It was used to re-create the tidstore in the same\ncontext again while resetting it, but we no longer support the reset\nAPI. Considering it again, would it be better to allocate the iterator\nstruct in the same context as we store the tidstore struct?\n\n>\n> + /* DSA for tidstore will be detached at the end of session */\n>\n> No other test module pins the mapping, but that doesn't necessarily\n> mean it's wrong. Is there some advantage over explicitly detaching?\n\nOne small benefit of not explicitly detaching dsa_area in\ntidstore_destroy() would be simplicity; IIUC if we want to do that, we\nneed to remember the dsa_area using (for example) a static variable,\nand free it if it's non-NULL. I've implemented this idea in the\nattached patch.\n\n>\n> +-- Add tids in random order.\n>\n> I don't see any randomization here. I do remember adding row_number to\n> remove whitespace in the output, but I don't remember a random order.\n> On that subject, the row_number was an easy trick to avoid extra\n> whitespace, but maybe we should just teach the setting function to\n> return blocknumber rather than null?\n\nGood idea, fixed.\n\n>\n> +Datum\n> +tidstore_create(PG_FUNCTION_ARGS)\n> +{\n> ...\n> + tidstore = TidStoreCreate(max_bytes, dsa);\n>\n> +Datum\n> +tidstore_set_block_offsets(PG_FUNCTION_ARGS)\n> +{\n> ....\n> + TidStoreSetBlockOffsets(tidstore, blkno, offs, noffs);\n>\n> These names are too similar. Maybe the test module should do\n> s/tidstore_/test_/ or similar.\n\nAgreed.\n\n>\n> +/* Sanity check if we've called tidstore_create() */\n> +static void\n> +check_tidstore_available(void)\n> +{\n> + if (tidstore == NULL)\n> + elog(ERROR, \"tidstore is not initialized\");\n> +}\n>\n> I don't find this very helpful. If a developer wiped out the create\n> call, wouldn't the test crash and burn pretty obviously?\n\nRemoved.\n\n>\n> In general, the .sql file is still very hard-coded. Functions are\n> created that contain a VALUES statement. Maybe it's okay for now, but\n> wanted to mention it. Ideally, we'd have some randomized tests,\n> without having to display it. That could be in addition to (not\n> replacing) the small tests we have that display input. (see below)\n>\n\nAgreed to add randomized tests in addition to the existing tests.\n\n>\n> v68-0002:\n>\n> @@ -329,6 +381,13 @@ TidStoreIsMember(TidStore *ts, ItemPointer tid)\n>\n> ret = (page->words[wordnum] & ((bitmapword) 1 << bitnum)) != 0;\n>\n> +#ifdef TIDSTORE_DEBUG\n> + if (!TidStoreIsShared(ts))\n> + {\n> + bool ret_debug = ts_debug_is_member(ts, tid);;\n> + Assert(ret == ret_debug);\n> + }\n> +#endif\n>\n> This only checking the case where we haven't returned already. In particular...\n>\n> + /* no entry for the blk */\n> + if (page == NULL)\n> + return false;\n> +\n> + wordnum = WORDNUM(off);\n> + bitnum = BITNUM(off);\n> +\n> + /* no bitmap for the off */\n> + if (wordnum >= page->nwords)\n> + return false;\n>\n> ...these results are not checked.\n>\n> More broadly, it seems like the test module should be able to test\n> everything that the debug-build array would complain about. Including\n> ordered iteration. This may require first saving our test input to a\n> table. We could create a cursor on a query that fetches the ordered\n> input from the table and verifies that the tid store iterate produces\n> the same ordered set, maybe with pl/pgSQL. Or something like that.\n> Seems like not a whole lot of work. I can try later in the week, if\n> you like.\n\nSounds a good idea. In fact, if there are some bugs in tidstore, it's\nlikely that even initdb would fail in practice. However, it's a very\ngood idea that we can test the tidstore anyway with such a check\nwithout a debug-build array.\n\nOr as another idea, I wonder if we could keep the debug-build array in\nsome form. For example, we use the array with the particular build\nflag and set a BF animal for that. That way, we can test the tidstore\nin more real cases.\n\n>\n> v68-0005/6 look ready to squash\n\nDone.\n\n>\n> v68-0008 - I'm not a fan of captilizing short comment fragments. I use\n> the style of either: short lower-case phrases, or full sentences\n> including capitalization, correct grammar and period. I see these two\n> styles all over the code base, as appropriate.\n\nAgreed.\n\n>\n> + /* Remain attached until end of backend */\n>\n> We'll probably want this comment, if in fact we want this behavior.\n\nKept it.\n\n>\n> + /*\n> + * Note that funcctx->call_cntr is incremented in SRF_RETURN_NEXT\n> + * before return.\n> + */\n>\n> I'm not sure what this is trying to say or why it's relevant, since\n> it's been a while since I've written a SRF in C.\n\nI wanted to say is that we cannot do like:\n\nSRF_RETURN_NEXT(funcctx, PointerGetDatum(&(tids[funcctx->call_cntr])));\n\nbecause funcctx->call_cntr is incremented *before* return and\ntherefore we will end up accessing the index out of range. I've took\nsome time to realize this fact before.\n\n> That's all I have for now, and I haven't looked at the vacuum changes this time.\n\nThank you for the comments!\n\nIn the latest (v69) patch:\n\n- squashed v68-0005 and v68-0006 patches.\n- removed most of the changes in v68-0007 patch.\n- addressed above review comments in v69-0002 patch.\n- v69-0003, 0004, and 0005 are miscellaneous updates.\n\nAs for renaming TidStore to TIDStore, I dropped the patch for now\nsince it seems we're using \"Tid\" in some function names and variable\nnames. If we want to update it, we can do that later.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 11 Mar 2024 17:13:17 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 11, 2024 at 5:13 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> In the latest (v69) patch:\n>\n> - squashed v68-0005 and v68-0006 patches.\n> - removed most of the changes in v68-0007 patch.\n> - addressed above review comments in v69-0002 patch.\n> - v69-0003, 0004, and 0005 are miscellaneous updates.\n\nSince the v69 conflicts with the current HEAD, I've rebased them. In\naddition, v70-0008 is the new patch, which cleans up the vacuum\nintegration patch.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 12 Mar 2024 11:20:18 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 11, 2024 at 3:13 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Mar 11, 2024 at 12:20 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Thu, Mar 7, 2024 at 10:35 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> > + ts->context = CurrentMemoryContext;\n> >\n> > As far as I can tell, this member is never accessed again -- am I\n> > missing something?\n>\n> You're right. It was used to re-create the tidstore in the same\n> context again while resetting it, but we no longer support the reset\n> API. Considering it again, would it be better to allocate the iterator\n> struct in the same context as we store the tidstore struct?\n\nThat makes sense.\n\n> > + /* DSA for tidstore will be detached at the end of session */\n> >\n> > No other test module pins the mapping, but that doesn't necessarily\n> > mean it's wrong. Is there some advantage over explicitly detaching?\n>\n> One small benefit of not explicitly detaching dsa_area in\n> tidstore_destroy() would be simplicity; IIUC if we want to do that, we\n> need to remember the dsa_area using (for example) a static variable,\n> and free it if it's non-NULL. I've implemented this idea in the\n> attached patch.\n\nOkay, I don't have a strong preference at this point.\n\n> > +-- Add tids in random order.\n> >\n> > I don't see any randomization here. I do remember adding row_number to\n> > remove whitespace in the output, but I don't remember a random order.\n> > On that subject, the row_number was an easy trick to avoid extra\n> > whitespace, but maybe we should just teach the setting function to\n> > return blocknumber rather than null?\n>\n> Good idea, fixed.\n\n+ test_set_block_offsets\n+------------------------\n+ 2147483647\n+ 0\n+ 4294967294\n+ 1\n+ 4294967295\n\nHmm, was the earlier comment about randomness referring to this? I'm\nnot sure what other regression tests do in these cases, or how\nrelibale this is. If this is a problem we could simply insert this\nresult into a temp table so it's not output.\n\n> > +Datum\n> > +tidstore_create(PG_FUNCTION_ARGS)\n> > +{\n> > ...\n> > + tidstore = TidStoreCreate(max_bytes, dsa);\n> >\n> > +Datum\n> > +tidstore_set_block_offsets(PG_FUNCTION_ARGS)\n> > +{\n> > ....\n> > + TidStoreSetBlockOffsets(tidstore, blkno, offs, noffs);\n> >\n> > These names are too similar. Maybe the test module should do\n> > s/tidstore_/test_/ or similar.\n>\n> Agreed.\n\nMostly okay, although a couple look a bit generic now. I'll leave it\nup to you if you want to tweak things.\n\n> > In general, the .sql file is still very hard-coded. Functions are\n> > created that contain a VALUES statement. Maybe it's okay for now, but\n> > wanted to mention it. Ideally, we'd have some randomized tests,\n> > without having to display it. That could be in addition to (not\n> > replacing) the small tests we have that display input. (see below)\n> >\n>\n> Agreed to add randomized tests in addition to the existing tests.\n\nI'll try something tomorrow.\n\n> Sounds a good idea. In fact, if there are some bugs in tidstore, it's\n> likely that even initdb would fail in practice. However, it's a very\n> good idea that we can test the tidstore anyway with such a check\n> without a debug-build array.\n>\n> Or as another idea, I wonder if we could keep the debug-build array in\n> some form. For example, we use the array with the particular build\n> flag and set a BF animal for that. That way, we can test the tidstore\n> in more real cases.\n\nI think the purpose of a debug flag is to help developers catch\nmistakes. I don't think it's quite useful enough for that. For one, it\nhas the same 1GB limitation as vacuum's current array. For another,\nit'd be a terrible way to debug moving tidbitmap.c from its hash table\nto use TID store -- AND/OR operations and lossy pages are pretty much\nundoable with a copy of vacuum's array. Last year, when I insisted on\ntrying a long term realistic load that compares the result with the\narray, the encoding scheme was much harder to understand in code. I\nthink it's now easier, and there are better tests.\n\n> In the latest (v69) patch:\n>\n> - squashed v68-0005 and v68-0006 patches.\n> - removed most of the changes in v68-0007 patch.\n> - addressed above review comments in v69-0002 patch.\n> - v69-0003, 0004, and 0005 are miscellaneous updates.\n>\n> As for renaming TidStore to TIDStore, I dropped the patch for now\n> since it seems we're using \"Tid\" in some function names and variable\n> names. If we want to update it, we can do that later.\n\nI think we're not consistent across the codebase, and it's fine to\ndrop that patch.\n\nv70-0008:\n\n@@ -489,7 +489,7 @@ parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)\n /*\n * Free the current tidstore and return allocated DSA segments to the\n * operating system. Then we recreate the tidstore with the same max_bytes\n- * limitation.\n+ * limitation we just used.\n\nNowadays, max_bytes is now more like a hint for tidstore, and not a\nlimitation, right? Vacuum has the limitation. Maybe instead of \"with\",\nwe should say \"passing the same limitation\".\n\nI wonder how \"di_info\" would look as \"dead_items_info\". I don't feel\ntoo strongly about it, though.\n\nI'm going to try additional regression tests, as mentioned, and try a\ncouple benchmarks. It should be only a couple more days.\n\nOne thing that occurred to me: The radix tree regression tests only\ncompile and run the local memory case. The tidstore commit would be\nthe first time the buildfarm has seen the shared memory case, so we\nshould look out for possible build failures of the same sort we saw\nwith the the radix tree tests. I see you've already removed the\nproblematic link_with command -- that's the kind of thing to\ndouble-check for.\n\n\n", "msg_date": "Tue, 12 Mar 2024 17:34:30 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Mar 12, 2024 at 7:34 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Mon, Mar 11, 2024 at 3:13 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Mar 11, 2024 at 12:20 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > >\n> > > On Thu, Mar 7, 2024 at 10:35 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > > + ts->context = CurrentMemoryContext;\n> > >\n> > > As far as I can tell, this member is never accessed again -- am I\n> > > missing something?\n> >\n> > You're right. It was used to re-create the tidstore in the same\n> > context again while resetting it, but we no longer support the reset\n> > API. Considering it again, would it be better to allocate the iterator\n> > struct in the same context as we store the tidstore struct?\n>\n> That makes sense.\n>\n> > > + /* DSA for tidstore will be detached at the end of session */\n> > >\n> > > No other test module pins the mapping, but that doesn't necessarily\n> > > mean it's wrong. Is there some advantage over explicitly detaching?\n> >\n> > One small benefit of not explicitly detaching dsa_area in\n> > tidstore_destroy() would be simplicity; IIUC if we want to do that, we\n> > need to remember the dsa_area using (for example) a static variable,\n> > and free it if it's non-NULL. I've implemented this idea in the\n> > attached patch.\n>\n> Okay, I don't have a strong preference at this point.\n\nI'd keep the update on that.\n\n>\n> > > +-- Add tids in random order.\n> > >\n> > > I don't see any randomization here. I do remember adding row_number to\n> > > remove whitespace in the output, but I don't remember a random order.\n> > > On that subject, the row_number was an easy trick to avoid extra\n> > > whitespace, but maybe we should just teach the setting function to\n> > > return blocknumber rather than null?\n> >\n> > Good idea, fixed.\n>\n> + test_set_block_offsets\n> +------------------------\n> + 2147483647\n> + 0\n> + 4294967294\n> + 1\n> + 4294967295\n>\n> Hmm, was the earlier comment about randomness referring to this? I'm\n> not sure what other regression tests do in these cases, or how\n> relibale this is. If this is a problem we could simply insert this\n> result into a temp table so it's not output.\n\nI didn't address the comment about randomness.\n\nI think that we will have both random TIDs tests and fixed TIDs tests\nin test_tidstore as we discussed, and probably we can do both tests\nwith similar steps; insert TIDs into both a temp table and tidstore\nand check if the tidstore returned the results as expected by\ncomparing the results to the temp table. Probably we can have a common\npl/pgsql function that checks that and raises a WARNING or an ERROR.\nGiven that this is very similar to what we did in test_radixtree, why\ndo we really want to implement it using a pl/pgsql function? When we\ndiscussed it before, I found the current way makes sense. But given\nthat we're adding more tests and will add more tests in the future,\ndoing the tests in C will be more maintainable and faster. Also, I\nthink we can do the debug-build array stuff in the test_tidstore code\ninstead.\n\n>\n> > > +Datum\n> > > +tidstore_create(PG_FUNCTION_ARGS)\n> > > +{\n> > > ...\n> > > + tidstore = TidStoreCreate(max_bytes, dsa);\n> > >\n> > > +Datum\n> > > +tidstore_set_block_offsets(PG_FUNCTION_ARGS)\n> > > +{\n> > > ....\n> > > + TidStoreSetBlockOffsets(tidstore, blkno, offs, noffs);\n> > >\n> > > These names are too similar. Maybe the test module should do\n> > > s/tidstore_/test_/ or similar.\n> >\n> > Agreed.\n>\n> Mostly okay, although a couple look a bit generic now. I'll leave it\n> up to you if you want to tweak things.\n>\n> > > In general, the .sql file is still very hard-coded. Functions are\n> > > created that contain a VALUES statement. Maybe it's okay for now, but\n> > > wanted to mention it. Ideally, we'd have some randomized tests,\n> > > without having to display it. That could be in addition to (not\n> > > replacing) the small tests we have that display input. (see below)\n> > >\n> >\n> > Agreed to add randomized tests in addition to the existing tests.\n>\n> I'll try something tomorrow.\n>\n> > Sounds a good idea. In fact, if there are some bugs in tidstore, it's\n> > likely that even initdb would fail in practice. However, it's a very\n> > good idea that we can test the tidstore anyway with such a check\n> > without a debug-build array.\n> >\n> > Or as another idea, I wonder if we could keep the debug-build array in\n> > some form. For example, we use the array with the particular build\n> > flag and set a BF animal for that. That way, we can test the tidstore\n> > in more real cases.\n>\n> I think the purpose of a debug flag is to help developers catch\n> mistakes. I don't think it's quite useful enough for that. For one, it\n> has the same 1GB limitation as vacuum's current array. For another,\n> it'd be a terrible way to debug moving tidbitmap.c from its hash table\n> to use TID store -- AND/OR operations and lossy pages are pretty much\n> undoable with a copy of vacuum's array.\n\nValid points.\n\nAs I mentioned above, if we implement the test cases in C, we can use\nthe debug-build array in the test code. And we won't use it in AND/OR\noperations tests in the future.\n\n>\n> > In the latest (v69) patch:\n> >\n> > - squashed v68-0005 and v68-0006 patches.\n> > - removed most of the changes in v68-0007 patch.\n> > - addressed above review comments in v69-0002 patch.\n> > - v69-0003, 0004, and 0005 are miscellaneous updates.\n> >\n> > As for renaming TidStore to TIDStore, I dropped the patch for now\n> > since it seems we're using \"Tid\" in some function names and variable\n> > names. If we want to update it, we can do that later.\n>\n> I think we're not consistent across the codebase, and it's fine to\n> drop that patch.\n>\n> v70-0008:\n>\n> @@ -489,7 +489,7 @@ parallel_vacuum_reset_dead_items(ParallelVacuumState *pvs)\n> /*\n> * Free the current tidstore and return allocated DSA segments to the\n> * operating system. Then we recreate the tidstore with the same max_bytes\n> - * limitation.\n> + * limitation we just used.\n>\n> Nowadays, max_bytes is now more like a hint for tidstore, and not a\n> limitation, right? Vacuum has the limitation.\n\nRight.\n\n> Maybe instead of \"with\",\n> we should say \"passing the same limitation\".\n\nWill fix.\n\n>\n> I wonder how \"di_info\" would look as \"dead_items_info\". I don't feel\n> too strongly about it, though.\n\nAgreed.\n\n>\n> I'm going to try additional regression tests, as mentioned, and try a\n> couple benchmarks. It should be only a couple more days.\n\nThank you!\n\n> One thing that occurred to me: The radix tree regression tests only\n> compile and run the local memory case. The tidstore commit would be\n> the first time the buildfarm has seen the shared memory case, so we\n> should look out for possible build failures of the same sort we saw\n> with the the radix tree tests. I see you've already removed the\n> problematic link_with command -- that's the kind of thing to\n> double-check for.\n\nGood point, agreed. I'll double-check it again.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 13 Mar 2024 10:38:43 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 13, 2024 at 8:39 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> As I mentioned above, if we implement the test cases in C, we can use\n> the debug-build array in the test code. And we won't use it in AND/OR\n> operations tests in the future.\n\nThat's a really interesting idea, so I went ahead and tried that for\nv71. This seems like a good basis for testing larger, randomized\ninputs, once we decide how best to hide that from the expected output.\nThe tests use SQL functions do_set_block_offsets() and\ncheck_set_block_offsets(). The latter does two checks against a tid\narray, and replaces test_dump_tids(). Funnily enough, the debug array\nitself gave false failures when using a similar array in the test\nharness, because it didn't know all the places where the array should\nhave been sorted -- it only worked by chance before because of what\norder things were done.\n\nI squashed everything from v70 and also took the liberty of switching\non shared memory for tid store tests. The only reason we didn't do\nthis with the radix tree tests is that the static attach/detach\nfunctions would raise warnings since they are not used.", "msg_date": "Wed, 13 Mar 2024 18:04:51 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 13, 2024 at 8:05 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Wed, Mar 13, 2024 at 8:39 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > As I mentioned above, if we implement the test cases in C, we can use\n> > the debug-build array in the test code. And we won't use it in AND/OR\n> > operations tests in the future.\n>\n> That's a really interesting idea, so I went ahead and tried that for\n> v71. This seems like a good basis for testing larger, randomized\n> inputs, once we decide how best to hide that from the expected output.\n> The tests use SQL functions do_set_block_offsets() and\n> check_set_block_offsets(). The latter does two checks against a tid\n> array, and replaces test_dump_tids().\n\nGreat! I think that's a very good starter.\n\nThe lookup_test() (and test_lookup_tids()) do also test that the\nIsMember() function returns false as expected if the TID doesn't exist\nin it, and probably we can do these tests in a C function too.\n\nBTW do we still want to test the tidstore by using a combination of\nSQL functions? We might no longer need to input TIDs via a SQL\nfunction.\n\n> Funnily enough, the debug array\n> itself gave false failures when using a similar array in the test\n> harness, because it didn't know all the places where the array should\n> have been sorted -- it only worked by chance before because of what\n> order things were done.\n\nGood catch, thanks.\n\n> I squashed everything from v70 and also took the liberty of switching\n> on shared memory for tid store tests. The only reason we didn't do\n> this with the radix tree tests is that the static attach/detach\n> functions would raise warnings since they are not used.\n\nAgreed to test the tidstore on shared memory.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 13 Mar 2024 23:28:54 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 13, 2024 at 9:29 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Wed, Mar 13, 2024 at 8:05 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Wed, Mar 13, 2024 at 8:39 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > As I mentioned above, if we implement the test cases in C, we can use\n> > > the debug-build array in the test code. And we won't use it in AND/OR\n> > > operations tests in the future.\n> >\n> > That's a really interesting idea, so I went ahead and tried that for\n> > v71. This seems like a good basis for testing larger, randomized\n> > inputs, once we decide how best to hide that from the expected output.\n> > The tests use SQL functions do_set_block_offsets() and\n> > check_set_block_offsets(). The latter does two checks against a tid\n> > array, and replaces test_dump_tids().\n>\n> Great! I think that's a very good starter.\n>\n> The lookup_test() (and test_lookup_tids()) do also test that the\n> IsMember() function returns false as expected if the TID doesn't exist\n> in it, and probably we can do these tests in a C function too.\n>\n> BTW do we still want to test the tidstore by using a combination of\n> SQL functions? We might no longer need to input TIDs via a SQL\n> function.\n\nI'm not sure. I stopped short of doing that to get feedback on this\nmuch. One advantage with SQL functions is we can use generate_series\nto easily input lists of blocks with different numbers and strides,\nand array literals for offsets are a bit easier. What do you think?\n\n\n", "msg_date": "Thu, 14 Mar 2024 07:59:15 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 14, 2024 at 9:59 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Wed, Mar 13, 2024 at 9:29 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Wed, Mar 13, 2024 at 8:05 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > >\n> > > On Wed, Mar 13, 2024 at 8:39 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > > As I mentioned above, if we implement the test cases in C, we can use\n> > > > the debug-build array in the test code. And we won't use it in AND/OR\n> > > > operations tests in the future.\n> > >\n> > > That's a really interesting idea, so I went ahead and tried that for\n> > > v71. This seems like a good basis for testing larger, randomized\n> > > inputs, once we decide how best to hide that from the expected output.\n> > > The tests use SQL functions do_set_block_offsets() and\n> > > check_set_block_offsets(). The latter does two checks against a tid\n> > > array, and replaces test_dump_tids().\n> >\n> > Great! I think that's a very good starter.\n> >\n> > The lookup_test() (and test_lookup_tids()) do also test that the\n> > IsMember() function returns false as expected if the TID doesn't exist\n> > in it, and probably we can do these tests in a C function too.\n> >\n> > BTW do we still want to test the tidstore by using a combination of\n> > SQL functions? We might no longer need to input TIDs via a SQL\n> > function.\n>\n> I'm not sure. I stopped short of doing that to get feedback on this\n> much. One advantage with SQL functions is we can use generate_series\n> to easily input lists of blocks with different numbers and strides,\n> and array literals for offsets are a bit easier. What do you think?\n\nWhile I'm not a fan of the following part, I agree that it makes sense\nto use SQL functions for test data generation:\n\n-- Constant values used in the tests.\n\\set maxblkno 4294967295\n-- The maximum number of heap tuples (MaxHeapTuplesPerPage) in 8kB block is 291.\n-- We use a higher number to test tidstore.\n\\set maxoffset 512\n\nIt would also be easier for developers to test the tidstore with their\nown data set. So I agreed with the current approach; use SQL functions\nfor data generation and do the actual tests inside C functions. Is it\nconvenient for developers if we have functions like generate_tids()\nand generate_random_tids() to generate TIDs so that they can pass them\nto do_set_block_offsets()? Then they call check_set_block_offsets()\nand others for actual data lookup and iteration tests.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 14 Mar 2024 10:53:14 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 14, 2024 at 8:53 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Mar 14, 2024 at 9:59 AM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > BTW do we still want to test the tidstore by using a combination of\n> > > SQL functions? We might no longer need to input TIDs via a SQL\n> > > function.\n> >\n> > I'm not sure. I stopped short of doing that to get feedback on this\n> > much. One advantage with SQL functions is we can use generate_series\n> > to easily input lists of blocks with different numbers and strides,\n> > and array literals for offsets are a bit easier. What do you think?\n>\n> While I'm not a fan of the following part, I agree that it makes sense\n> to use SQL functions for test data generation:\n>\n> -- Constant values used in the tests.\n> \\set maxblkno 4294967295\n> -- The maximum number of heap tuples (MaxHeapTuplesPerPage) in 8kB block is 291.\n> -- We use a higher number to test tidstore.\n> \\set maxoffset 512\n\nI'm not really a fan of these either, and could be removed a some\npoint if we've done everything else nicely.\n\n> It would also be easier for developers to test the tidstore with their\n> own data set. So I agreed with the current approach; use SQL functions\n> for data generation and do the actual tests inside C functions.\n\nOkay, here's an another idea: Change test_lookup_tids() to be more\ngeneral and put the validation down into C as well. First we save the\nblocks from do_set_block_offsets() into a table, then with all those\nblocks lookup a sufficiently-large range of possible offsets and save\nfound values in another array. So the static items structure would\nhave 3 arrays: inserts, successful lookups, and iteration (currently\nthe iteration output is private to check_set_block_offsets(). Then\nsort as needed and check they are all the same.\n\nFurther thought: We may not really need to test block numbers that\nvigorously, since the radix tree tests should cover keys/values pretty\nwell. The difference here is using bitmaps of tids and that should be\nwell covered.\n\nLocally (not CI), we should try big inputs to make sure we can\nactually go up to many GB -- it's easier and faster this way than\nhaving vacuum give us a large data set.\n\n> Is it\n> convenient for developers if we have functions like generate_tids()\n> and generate_random_tids() to generate TIDs so that they can pass them\n> to do_set_block_offsets()?\n\nI guess I don't see the advantage of adding a layer of indirection at\nthis point, but it could be useful at a later time.\n\n\n", "msg_date": "Thu, 14 Mar 2024 11:29:25 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 14, 2024 at 1:29 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 14, 2024 at 8:53 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Mar 14, 2024 at 9:59 AM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > > BTW do we still want to test the tidstore by using a combination of\n> > > > SQL functions? We might no longer need to input TIDs via a SQL\n> > > > function.\n> > >\n> > > I'm not sure. I stopped short of doing that to get feedback on this\n> > > much. One advantage with SQL functions is we can use generate_series\n> > > to easily input lists of blocks with different numbers and strides,\n> > > and array literals for offsets are a bit easier. What do you think?\n> >\n> > While I'm not a fan of the following part, I agree that it makes sense\n> > to use SQL functions for test data generation:\n> >\n> > -- Constant values used in the tests.\n> > \\set maxblkno 4294967295\n> > -- The maximum number of heap tuples (MaxHeapTuplesPerPage) in 8kB block is 291.\n> > -- We use a higher number to test tidstore.\n> > \\set maxoffset 512\n>\n> I'm not really a fan of these either, and could be removed a some\n> point if we've done everything else nicely.\n>\n> > It would also be easier for developers to test the tidstore with their\n> > own data set. So I agreed with the current approach; use SQL functions\n> > for data generation and do the actual tests inside C functions.\n>\n> Okay, here's an another idea: Change test_lookup_tids() to be more\n> general and put the validation down into C as well. First we save the\n> blocks from do_set_block_offsets() into a table, then with all those\n> blocks lookup a sufficiently-large range of possible offsets and save\n> found values in another array. So the static items structure would\n> have 3 arrays: inserts, successful lookups, and iteration (currently\n> the iteration output is private to check_set_block_offsets(). Then\n> sort as needed and check they are all the same.\n\nThat's a promising idea. We can use the same mechanism for randomized\ntests too. If you're going to work on this, I'll do other tests on my\nenvironment in the meantime.\n\n>\n> Further thought: We may not really need to test block numbers that\n> vigorously, since the radix tree tests should cover keys/values pretty\n> well.\n\nAgreed. Probably boundary block numbers: 0, 1, MaxBlockNumber - 1, and\nMaxBlockNumber, would be sufficient.\n\n> The difference here is using bitmaps of tids and that should be\n> well covered.\n\nRight. We would need to test offset numbers vigorously instead.\n\n>\n> Locally (not CI), we should try big inputs to make sure we can\n> actually go up to many GB -- it's easier and faster this way than\n> having vacuum give us a large data set.\n\nI'll do these tests.\n\n>\n> > Is it\n> > convenient for developers if we have functions like generate_tids()\n> > and generate_random_tids() to generate TIDs so that they can pass them\n> > to do_set_block_offsets()?\n>\n> I guess I don't see the advantage of adding a layer of indirection at\n> this point, but it could be useful at a later time.\n\nAgreed.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 14 Mar 2024 14:05:31 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 14, 2024 at 12:06 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Mar 14, 2024 at 1:29 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > Okay, here's an another idea: Change test_lookup_tids() to be more\n> > general and put the validation down into C as well. First we save the\n> > blocks from do_set_block_offsets() into a table, then with all those\n> > blocks lookup a sufficiently-large range of possible offsets and save\n> > found values in another array. So the static items structure would\n> > have 3 arrays: inserts, successful lookups, and iteration (currently\n> > the iteration output is private to check_set_block_offsets(). Then\n> > sort as needed and check they are all the same.\n>\n> That's a promising idea. We can use the same mechanism for randomized\n> tests too. If you're going to work on this, I'll do other tests on my\n> environment in the meantime.\n\nSome progress on this in v72 -- I tried first without using SQL to\nsave the blocks, just using the unique blocks from the verification\narray. It seems to work fine. Some open questions on the test module:\n\n- Since there are now three arrays we should reduce max bytes to\nsomething smaller.\n- Further on that, I'm not sure if the \"is full\" test is telling us\nmuch. It seems we could make max bytes a static variable and set it to\nthe size of the empty store. I'm guessing it wouldn't take much to add\nenough tids so that the contexts need to allocate some blocks, and\nthen it would appear full and we can test that. I've made it so all\narrays repalloc when needed, just in case.\n- Why are we switching to TopMemoryContext? It's not explained -- the\ncomment only tells what the code is doing (which is obvious), but not\nwhy.\n- I'm not sure it's useful to keep test_lookup_tids() around. Since we\nnow have a separate lookup test, the only thing it can tell us is that\nlookups fail on an empty store. I arranged it so that\ncheck_set_block_offsets() works on an empty store. Although that's\neven more trivial, it's just reusing what we already need.\n\n\n", "msg_date": "Thu, 14 Mar 2024 16:55:42 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 14, 2024 at 6:55 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 14, 2024 at 12:06 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Mar 14, 2024 at 1:29 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > Okay, here's an another idea: Change test_lookup_tids() to be more\n> > > general and put the validation down into C as well. First we save the\n> > > blocks from do_set_block_offsets() into a table, then with all those\n> > > blocks lookup a sufficiently-large range of possible offsets and save\n> > > found values in another array. So the static items structure would\n> > > have 3 arrays: inserts, successful lookups, and iteration (currently\n> > > the iteration output is private to check_set_block_offsets(). Then\n> > > sort as needed and check they are all the same.\n> >\n> > That's a promising idea. We can use the same mechanism for randomized\n> > tests too. If you're going to work on this, I'll do other tests on my\n> > environment in the meantime.\n>\n> Some progress on this in v72 -- I tried first without using SQL to\n> save the blocks, just using the unique blocks from the verification\n> array. It seems to work fine.\n\nThanks!\n\n>\n> - Since there are now three arrays we should reduce max bytes to\n> something smaller.\n\nAgreed.\n\n> - Further on that, I'm not sure if the \"is full\" test is telling us\n> much. It seems we could make max bytes a static variable and set it to\n> the size of the empty store. I'm guessing it wouldn't take much to add\n> enough tids so that the contexts need to allocate some blocks, and\n> then it would appear full and we can test that. I've made it so all\n> arrays repalloc when needed, just in case.\n\nHow about using work_mem as max_bytes instead of having it as a static\nvariable? In test_tidstore.sql we set work_mem before creating the\ntidstore. It would make the tidstore more controllable by SQL queries.\n\n> - Why are we switching to TopMemoryContext? It's not explained -- the\n> comment only tells what the code is doing (which is obvious), but not\n> why.\n\nThis is because the tidstore needs to live across the transaction\nboundary. We can use TopMemoryContext or CacheMemoryContext.\n\n> - I'm not sure it's useful to keep test_lookup_tids() around. Since we\n> now have a separate lookup test, the only thing it can tell us is that\n> lookups fail on an empty store. I arranged it so that\n> check_set_block_offsets() works on an empty store. Although that's\n> even more trivial, it's just reusing what we already need.\n\nAgreed.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 14 Mar 2024 21:03:27 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 14, 2024 at 9:03 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Mar 14, 2024 at 6:55 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Thu, Mar 14, 2024 at 12:06 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Thu, Mar 14, 2024 at 1:29 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > > Okay, here's an another idea: Change test_lookup_tids() to be more\n> > > > general and put the validation down into C as well. First we save the\n> > > > blocks from do_set_block_offsets() into a table, then with all those\n> > > > blocks lookup a sufficiently-large range of possible offsets and save\n> > > > found values in another array. So the static items structure would\n> > > > have 3 arrays: inserts, successful lookups, and iteration (currently\n> > > > the iteration output is private to check_set_block_offsets(). Then\n> > > > sort as needed and check they are all the same.\n> > >\n> > > That's a promising idea. We can use the same mechanism for randomized\n> > > tests too. If you're going to work on this, I'll do other tests on my\n> > > environment in the meantime.\n> >\n> > Some progress on this in v72 -- I tried first without using SQL to\n> > save the blocks, just using the unique blocks from the verification\n> > array. It seems to work fine.\n>\n> Thanks!\n>\n> >\n> > - Since there are now three arrays we should reduce max bytes to\n> > something smaller.\n>\n> Agreed.\n>\n> > - Further on that, I'm not sure if the \"is full\" test is telling us\n> > much. It seems we could make max bytes a static variable and set it to\n> > the size of the empty store. I'm guessing it wouldn't take much to add\n> > enough tids so that the contexts need to allocate some blocks, and\n> > then it would appear full and we can test that. I've made it so all\n> > arrays repalloc when needed, just in case.\n>\n> How about using work_mem as max_bytes instead of having it as a static\n> variable? In test_tidstore.sql we set work_mem before creating the\n> tidstore. It would make the tidstore more controllable by SQL queries.\n>\n> > - Why are we switching to TopMemoryContext? It's not explained -- the\n> > comment only tells what the code is doing (which is obvious), but not\n> > why.\n>\n> This is because the tidstore needs to live across the transaction\n> boundary. We can use TopMemoryContext or CacheMemoryContext.\n>\n> > - I'm not sure it's useful to keep test_lookup_tids() around. Since we\n> > now have a separate lookup test, the only thing it can tell us is that\n> > lookups fail on an empty store. I arranged it so that\n> > check_set_block_offsets() works on an empty store. Although that's\n> > even more trivial, it's just reusing what we already need.\n>\n> Agreed.\n>\n\nI have two questions on tidstore.c:\n\n+/*\n+ * Set the given TIDs on the blkno to TidStore.\n+ *\n+ * NB: the offset numbers in offsets must be sorted in ascending order.\n+ */\n\nDo we need some assertions to check if the given offset numbers are\nsorted expectedly?\n\n---\n+ if (TidStoreIsShared(ts))\n+ found = shared_rt_set(ts->tree.shared, blkno, page);\n+ else\n+ found = local_rt_set(ts->tree.local, blkno, page);\n+\n+ Assert(!found);\n\nGiven TidStoreSetBlockOffsets() is designed to always set (i.e.\noverwrite) the value, I think we should not expect that found is\nalways false.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 15 Mar 2024 11:48:37 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 14, 2024 at 7:04 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Mar 14, 2024 at 6:55 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Thu, Mar 14, 2024 at 12:06 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Thu, Mar 14, 2024 at 1:29 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > > Okay, here's an another idea: Change test_lookup_tids() to be more\n> > > > general and put the validation down into C as well. First we save the\n> > > > blocks from do_set_block_offsets() into a table, then with all those\n> > > > blocks lookup a sufficiently-large range of possible offsets and save\n> > > > found values in another array. So the static items structure would\n> > > > have 3 arrays: inserts, successful lookups, and iteration (currently\n> > > > the iteration output is private to check_set_block_offsets(). Then\n> > > > sort as needed and check they are all the same.\n> > >\n> > > That's a promising idea. We can use the same mechanism for randomized\n> > > tests too. If you're going to work on this, I'll do other tests on my\n> > > environment in the meantime.\n> >\n> > Some progress on this in v72 -- I tried first without using SQL to\n> > save the blocks, just using the unique blocks from the verification\n> > array. It seems to work fine.\n>\n> Thanks!\n\nSeems I forgot the attachment last time...there's more stuff now\nanyway, based on discussion.\n\n> > - Since there are now three arrays we should reduce max bytes to\n> > something smaller.\n>\n> Agreed.\n\nI went further than this, see below.\n\n> > - Further on that, I'm not sure if the \"is full\" test is telling us\n> > much. It seems we could make max bytes a static variable and set it to\n> > the size of the empty store. I'm guessing it wouldn't take much to add\n> > enough tids so that the contexts need to allocate some blocks, and\n> > then it would appear full and we can test that. I've made it so all\n> > arrays repalloc when needed, just in case.\n>\n> How about using work_mem as max_bytes instead of having it as a static\n> variable? In test_tidstore.sql we set work_mem before creating the\n> tidstore. It would make the tidstore more controllable by SQL queries.\n\nMy complaint is that the \"is full\" test is trivial, and also strange\nin that max_bytes is used for two unrelated things:\n\n- the initial size of the verification arrays, which was always larger\nthan necessary, and now there are three of them\n- the hint to TidStoreCreate to calculate its max block size / the\nthreshold for being \"full\"\n\nTo make the \"is_full\" test slightly less trivial, my idea is to save\nthe empty store size and later add enough tids so that it has to\nallocate new blocks/DSA segments, which is not that many, and then it\nwill appear full. I've done this and also separated the purpose of\nvarious sizes in v72-0009/10.\n\nUsing actual work_mem seems a bit more difficult to make this work.\n\n> > - I'm not sure it's useful to keep test_lookup_tids() around. Since we\n> > now have a separate lookup test, the only thing it can tell us is that\n> > lookups fail on an empty store. I arranged it so that\n> > check_set_block_offsets() works on an empty store. Although that's\n> > even more trivial, it's just reusing what we already need.\n>\n> Agreed.\n\nRemoved in v72-0007\n\nOn Fri, Mar 15, 2024 at 9:49 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> I have two questions on tidstore.c:\n>\n> +/*\n> + * Set the given TIDs on the blkno to TidStore.\n> + *\n> + * NB: the offset numbers in offsets must be sorted in ascending order.\n> + */\n>\n> Do we need some assertions to check if the given offset numbers are\n> sorted expectedly?\n\nDone in v72-0008\n\n> ---\n> + if (TidStoreIsShared(ts))\n> + found = shared_rt_set(ts->tree.shared, blkno, page);\n> + else\n> + found = local_rt_set(ts->tree.local, blkno, page);\n> +\n> + Assert(!found);\n>\n> Given TidStoreSetBlockOffsets() is designed to always set (i.e.\n> overwrite) the value, I think we should not expect that found is\n> always false.\n\nI find that a puzzling statement, since 1) it was designed for\ninsert-only workloads, not actual overwrite IIRC and 2) the tests will\nnow fail if the same block is set twice, since we just switched the\ntests to use a remnant of vacuum's old array. Having said that, I\ndon't object to removing artificial barriers to using it for purposes\nnot yet imagined, as long as test_tidstore.sql warns against that.\n\nGiven the above two things, I think this function's comment needs\nstronger language about its limitations. Perhaps even mention that\nit's intended for, and optimized for, vacuum. You and I have long\nknown that tidstore would need a separate, more complex, function to\nadd or remove individual tids from existing entries, but it might be\ngood to have that documented.\n\nOther things:\n\nv72-0011: Test that zero offset raises an error.\n\nv72-0013: I had wanted to microbenchmark this, but since we are\nrunning short of time I decided to skip that, so I want to revert some\ncode to make it again more similar to the equivalent in tidbitmap.c.\nIn the absence of evidence, it seems better to do it this way.", "msg_date": "Fri, 15 Mar 2024 14:36:10 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 15, 2024 at 4:36 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 14, 2024 at 7:04 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Mar 14, 2024 at 6:55 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > >\n> > > On Thu, Mar 14, 2024 at 12:06 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > >\n> > > > On Thu, Mar 14, 2024 at 1:29 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > > > Okay, here's an another idea: Change test_lookup_tids() to be more\n> > > > > general and put the validation down into C as well. First we save the\n> > > > > blocks from do_set_block_offsets() into a table, then with all those\n> > > > > blocks lookup a sufficiently-large range of possible offsets and save\n> > > > > found values in another array. So the static items structure would\n> > > > > have 3 arrays: inserts, successful lookups, and iteration (currently\n> > > > > the iteration output is private to check_set_block_offsets(). Then\n> > > > > sort as needed and check they are all the same.\n> > > >\n> > > > That's a promising idea. We can use the same mechanism for randomized\n> > > > tests too. If you're going to work on this, I'll do other tests on my\n> > > > environment in the meantime.\n> > >\n> > > Some progress on this in v72 -- I tried first without using SQL to\n> > > save the blocks, just using the unique blocks from the verification\n> > > array. It seems to work fine.\n> >\n> > Thanks!\n>\n> Seems I forgot the attachment last time...there's more stuff now\n> anyway, based on discussion.\n\nThank you for updating the patches!\n\nThe idea of using three TID arrays for the lookup test and iteration\ntest looks good to me. I think we can add random-TIDs tests on top of\nit.\n\n>\n> > > - Since there are now three arrays we should reduce max bytes to\n> > > something smaller.\n> >\n> > Agreed.\n>\n> I went further than this, see below.\n>\n> > > - Further on that, I'm not sure if the \"is full\" test is telling us\n> > > much. It seems we could make max bytes a static variable and set it to\n> > > the size of the empty store. I'm guessing it wouldn't take much to add\n> > > enough tids so that the contexts need to allocate some blocks, and\n> > > then it would appear full and we can test that. I've made it so all\n> > > arrays repalloc when needed, just in case.\n> >\n> > How about using work_mem as max_bytes instead of having it as a static\n> > variable? In test_tidstore.sql we set work_mem before creating the\n> > tidstore. It would make the tidstore more controllable by SQL queries.\n>\n> My complaint is that the \"is full\" test is trivial, and also strange\n> in that max_bytes is used for two unrelated things:\n>\n> - the initial size of the verification arrays, which was always larger\n> than necessary, and now there are three of them\n> - the hint to TidStoreCreate to calculate its max block size / the\n> threshold for being \"full\"\n>\n> To make the \"is_full\" test slightly less trivial, my idea is to save\n> the empty store size and later add enough tids so that it has to\n> allocate new blocks/DSA segments, which is not that many, and then it\n> will appear full. I've done this and also separated the purpose of\n> various sizes in v72-0009/10.\n\nI see your point and the changes look good to me.\n\n> Using actual work_mem seems a bit more difficult to make this work.\n\nAgreed.\n\n>\n>\n> > ---\n> > + if (TidStoreIsShared(ts))\n> > + found = shared_rt_set(ts->tree.shared, blkno, page);\n> > + else\n> > + found = local_rt_set(ts->tree.local, blkno, page);\n> > +\n> > + Assert(!found);\n> >\n> > Given TidStoreSetBlockOffsets() is designed to always set (i.e.\n> > overwrite) the value, I think we should not expect that found is\n> > always false.\n>\n> I find that a puzzling statement, since 1) it was designed for\n> insert-only workloads, not actual overwrite IIRC and 2) the tests will\n> now fail if the same block is set twice, since we just switched the\n> tests to use a remnant of vacuum's old array. Having said that, I\n> don't object to removing artificial barriers to using it for purposes\n> not yet imagined, as long as test_tidstore.sql warns against that.\n\nI think that if it supports only insert-only workload and expects the\nsame block is set only once, it should raise an error rather than an\nassertion. It's odd to me that the function fails only with an\nassertion build assertions even though it actually works fine even in\nthat case.\n\nAs for test_tidstore you're right that the test code doesn't handle\nthe case where setting the same block twice. I think that there is no\nproblem in the fixed-TIDs tests, but we would need something for\nrandom-TIDs tests so that we don't set the same block twice. I guess\nit could be trivial since we can use SQL queries to generate TIDs. I'm\nnot sure how the random-TIDs tests would be like, but I think we can\nuse SELECT DISTINCT to eliminate the duplicates of block numbers to\nuse.\n\n>\n> Given the above two things, I think this function's comment needs\n> stronger language about its limitations. Perhaps even mention that\n> it's intended for, and optimized for, vacuum. You and I have long\n> known that tidstore would need a separate, more complex, function to\n> add or remove individual tids from existing entries, but it might be\n> good to have that documented.\n\nAgreed.\n\n>\n> Other things:\n>\n> v72-0011: Test that zero offset raises an error.\n>\n> v72-0013: I had wanted to microbenchmark this, but since we are\n> running short of time I decided to skip that, so I want to revert some\n> code to make it again more similar to the equivalent in tidbitmap.c.\n> In the absence of evidence, it seems better to do it this way.\n\nLGTM.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 15 Mar 2024 23:16:57 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 15, 2024 at 9:17 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Fri, Mar 15, 2024 at 4:36 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Thu, Mar 14, 2024 at 7:04 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> > > Given TidStoreSetBlockOffsets() is designed to always set (i.e.\n> > > overwrite) the value, I think we should not expect that found is\n> > > always false.\n> >\n> > I find that a puzzling statement, since 1) it was designed for\n> > insert-only workloads, not actual overwrite IIRC and 2) the tests will\n> > now fail if the same block is set twice, since we just switched the\n> > tests to use a remnant of vacuum's old array. Having said that, I\n> > don't object to removing artificial barriers to using it for purposes\n> > not yet imagined, as long as test_tidstore.sql warns against that.\n>\n> I think that if it supports only insert-only workload and expects the\n> same block is set only once, it should raise an error rather than an\n> assertion. It's odd to me that the function fails only with an\n> assertion build assertions even though it actually works fine even in\n> that case.\n\nAfter thinking some more, I think you're right -- it's too\nheavy-handed to throw an error/assert and a public function shouldn't\nmake assumptions about the caller. It's probably just a matter of\ndocumenting the function (and it's lack of generality), and the tests\n(which are based on the thing we're replacing).\n\n> As for test_tidstore you're right that the test code doesn't handle\n> the case where setting the same block twice. I think that there is no\n> problem in the fixed-TIDs tests, but we would need something for\n> random-TIDs tests so that we don't set the same block twice. I guess\n> it could be trivial since we can use SQL queries to generate TIDs. I'm\n> not sure how the random-TIDs tests would be like, but I think we can\n> use SELECT DISTINCT to eliminate the duplicates of block numbers to\n> use.\n\nAlso, I don't think we need random blocks, since the radix tree tests\nexcercise that heavily already.\n\nRandom offsets is what I was thinking of (if made distinct and\nordered), but even there the code is fairy trivial, so I don't have a\nstrong feeling about it.\n\n> > Given the above two things, I think this function's comment needs\n> > stronger language about its limitations. Perhaps even mention that\n> > it's intended for, and optimized for, vacuum. You and I have long\n> > known that tidstore would need a separate, more complex, function to\n> > add or remove individual tids from existing entries, but it might be\n> > good to have that documented.\n>\n> Agreed.\n\nHow about this:\n\n /*\n- * Set the given TIDs on the blkno to TidStore.\n+ * Create or replace an entry for the given block and array of offsets\n *\n- * NB: the offset numbers in offsets must be sorted in ascending order.\n+ * NB: This function is designed and optimized for vacuum's heap scanning\n+ * phase, so has some limitations:\n+ * - The offset numbers in \"offsets\" must be sorted in ascending order.\n+ * - If the block number already exists, the entry will be replaced --\n+ * there is no way to add or remove offsets from an entry.\n */\n void\n TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets,\n\nI think we can stop including the debug-tid-store patch for CI now.\nThat would allow getting rid of some unnecessary variables. More\ncomments:\n\n+ * Prepare to iterate through a TidStore. Since the radix tree is locked during\n+ * the iteration, TidStoreEndIterate() needs to be called when finished.\n\n+ * Concurrent updates during the iteration will be blocked when inserting a\n+ * key-value to the radix tree.\n\nThis is outdated. Locking is optional. The remaining real reason now\nis that TidStoreEndIterate needs to free memory. We probably need to\nsay something about locking, too, but not this.\n\n+ * Scan the TidStore and return a pointer to TidStoreIterResult that has TIDs\n+ * in one block. We return the block numbers in ascending order and the offset\n+ * numbers in each result is also sorted in ascending order.\n+ */\n+TidStoreIterResult *\n+TidStoreIterateNext(TidStoreIter *iter)\n\nThe wording is a bit awkward.\n\n+/*\n+ * Finish an iteration over TidStore. This needs to be called after finishing\n+ * or when existing an iteration.\n+ */\n\ns/existing/exiting/ ?\n\nIt seems to say we need to finish after finishing. Maybe more precise wording.\n\n+/* Extract TIDs from the given key-value pair */\n+static void\n+tidstore_iter_extract_tids(TidStoreIter *iter, uint64 key,\nBlocktableEntry *page)\n\nThis is a leftover from the old encoding scheme. This should really\ntake a \"BlockNumber blockno\" not a \"key\", and the only call site\nshould probably cast the uint64 to BlockNumber.\n\n+ * tidstore.h\n+ * Tid storage.\n+ *\n+ *\n+ * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group\n\nUpdate year.\n\n+typedef struct BlocktableEntry\n+{\n+ uint16 nwords;\n+ bitmapword words[FLEXIBLE_ARRAY_MEMBER];\n+} BlocktableEntry;\n\nIn my WIP for runtime-embeddable offsets, nwords needs to be one byte.\nThat doesn't have any real-world affect on the largest offset\nencountered, and only in 32-bit builds with 32kB block size would the\ntheoretical max change at all. To be precise, we could use in the\nMaxBlocktableEntrySize calculation:\n\nMin(MaxOffsetNumber, BITS_PER_BITMAPWORD * PG_INT8_MAX - 1);\n\nTests: I never got rid of maxblkno and maxoffset, in case you wanted\nto do that. And as discussed above, maybe\n\n-- Note: The test code use an array of TIDs for verification similar\n-- to vacuum's dead item array pre-PG17. To avoid adding duplicates,\n-- each call to do_set_block_offsets() should use different block\n-- numbers.\n\n\n", "msg_date": "Sun, 17 Mar 2024 09:46:13 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sun, Mar 17, 2024 at 11:46 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Fri, Mar 15, 2024 at 9:17 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Fri, Mar 15, 2024 at 4:36 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > >\n> > > On Thu, Mar 14, 2024 at 7:04 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > > > Given TidStoreSetBlockOffsets() is designed to always set (i.e.\n> > > > overwrite) the value, I think we should not expect that found is\n> > > > always false.\n> > >\n> > > I find that a puzzling statement, since 1) it was designed for\n> > > insert-only workloads, not actual overwrite IIRC and 2) the tests will\n> > > now fail if the same block is set twice, since we just switched the\n> > > tests to use a remnant of vacuum's old array. Having said that, I\n> > > don't object to removing artificial barriers to using it for purposes\n> > > not yet imagined, as long as test_tidstore.sql warns against that.\n> >\n> > I think that if it supports only insert-only workload and expects the\n> > same block is set only once, it should raise an error rather than an\n> > assertion. It's odd to me that the function fails only with an\n> > assertion build assertions even though it actually works fine even in\n> > that case.\n>\n> After thinking some more, I think you're right -- it's too\n> heavy-handed to throw an error/assert and a public function shouldn't\n> make assumptions about the caller. It's probably just a matter of\n> documenting the function (and it's lack of generality), and the tests\n> (which are based on the thing we're replacing).\n\nRemoved 'found' in 0003 patch.\n\n>\n> > As for test_tidstore you're right that the test code doesn't handle\n> > the case where setting the same block twice. I think that there is no\n> > problem in the fixed-TIDs tests, but we would need something for\n> > random-TIDs tests so that we don't set the same block twice. I guess\n> > it could be trivial since we can use SQL queries to generate TIDs. I'm\n> > not sure how the random-TIDs tests would be like, but I think we can\n> > use SELECT DISTINCT to eliminate the duplicates of block numbers to\n> > use.\n>\n> Also, I don't think we need random blocks, since the radix tree tests\n> excercise that heavily already.\n>\n> Random offsets is what I was thinking of (if made distinct and\n> ordered), but even there the code is fairy trivial, so I don't have a\n> strong feeling about it.\n\nAgreed.\n\n>\n> > > Given the above two things, I think this function's comment needs\n> > > stronger language about its limitations. Perhaps even mention that\n> > > it's intended for, and optimized for, vacuum. You and I have long\n> > > known that tidstore would need a separate, more complex, function to\n> > > add or remove individual tids from existing entries, but it might be\n> > > good to have that documented.\n> >\n> > Agreed.\n>\n> How about this:\n>\n> /*\n> - * Set the given TIDs on the blkno to TidStore.\n> + * Create or replace an entry for the given block and array of offsets\n> *\n> - * NB: the offset numbers in offsets must be sorted in ascending order.\n> + * NB: This function is designed and optimized for vacuum's heap scanning\n> + * phase, so has some limitations:\n> + * - The offset numbers in \"offsets\" must be sorted in ascending order.\n> + * - If the block number already exists, the entry will be replaced --\n> + * there is no way to add or remove offsets from an entry.\n> */\n> void\n> TidStoreSetBlockOffsets(TidStore *ts, BlockNumber blkno, OffsetNumber *offsets,\n\nLooks good.\n\n>\n> I think we can stop including the debug-tid-store patch for CI now.\n> That would allow getting rid of some unnecessary variables.\n\nAgreed.\n\n>\n> + * Prepare to iterate through a TidStore. Since the radix tree is locked during\n> + * the iteration, TidStoreEndIterate() needs to be called when finished.\n>\n> + * Concurrent updates during the iteration will be blocked when inserting a\n> + * key-value to the radix tree.\n>\n> This is outdated. Locking is optional. The remaining real reason now\n> is that TidStoreEndIterate needs to free memory. We probably need to\n> say something about locking, too, but not this.\n\nFixed.\n\n>\n> + * Scan the TidStore and return a pointer to TidStoreIterResult that has TIDs\n> + * in one block. We return the block numbers in ascending order and the offset\n> + * numbers in each result is also sorted in ascending order.\n> + */\n> +TidStoreIterResult *\n> +TidStoreIterateNext(TidStoreIter *iter)\n>\n> The wording is a bit awkward.\n\nFixed.\n\n>\n> +/*\n> + * Finish an iteration over TidStore. This needs to be called after finishing\n> + * or when existing an iteration.\n> + */\n>\n> s/existing/exiting/ ?\n>\n> It seems to say we need to finish after finishing. Maybe more precise wording.\n\nFixed.\n\n>\n> +/* Extract TIDs from the given key-value pair */\n> +static void\n> +tidstore_iter_extract_tids(TidStoreIter *iter, uint64 key,\n> BlocktableEntry *page)\n>\n> This is a leftover from the old encoding scheme. This should really\n> take a \"BlockNumber blockno\" not a \"key\", and the only call site\n> should probably cast the uint64 to BlockNumber.\n\nFixed.\n\n>\n> + * tidstore.h\n> + * Tid storage.\n> + *\n> + *\n> + * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group\n>\n> Update year.\n\nUpdated.\n\n>\n> +typedef struct BlocktableEntry\n> +{\n> + uint16 nwords;\n> + bitmapword words[FLEXIBLE_ARRAY_MEMBER];\n> +} BlocktableEntry;\n>\n> In my WIP for runtime-embeddable offsets, nwords needs to be one byte.\n> That doesn't have any real-world affect on the largest offset\n> encountered, and only in 32-bit builds with 32kB block size would the\n> theoretical max change at all. To be precise, we could use in the\n> MaxBlocktableEntrySize calculation:\n>\n> Min(MaxOffsetNumber, BITS_PER_BITMAPWORD * PG_INT8_MAX - 1);\n\nI don't get this expression. Making the nwords one byte works well?\nWith 8kB blocks, MaxOffsetNumber is 2048 and it requires 256\nbitmapword entries on 64-bit OS or 512 bitmapword entries on 32-bit\nOS, respectively. One byte nwrods variable seems not to be sufficient\nfor both cases. Also, where does the expression \"BITS_PER_BITMAPWORD *\nPG_INT8_MAX - 1\" come from?\n\n>\n> Tests: I never got rid of maxblkno and maxoffset, in case you wanted\n> to do that. And as discussed above, maybe\n>\n> -- Note: The test code use an array of TIDs for verification similar\n> -- to vacuum's dead item array pre-PG17. To avoid adding duplicates,\n> -- each call to do_set_block_offsets() should use different block\n> -- numbers.\n\nI've added this comment on top of the .sql file.\n\nI've attached the new patch sets. The summary of updates is:\n\n- Squashed all updates of v72\n- 0004 and 0005 are updates for test_tidstore.sql. Particularly the\n0005 patch adds randomized TID tests.\n- 0006 addresses review comments above.\n- 0007 and 0008 patches are pgindent stuff.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 18 Mar 2024 13:12:07 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 18, 2024 at 11:12 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Sun, Mar 17, 2024 at 11:46 AM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > Random offsets is what I was thinking of (if made distinct and\n> > ordered), but even there the code is fairy trivial, so I don't have a\n> > strong feeling about it.\n>\n> Agreed.\n\nLooks good.\n\nA related thing I should mention is that the tests which look up all\npossible offsets are really expensive with the number of blocks we're\nusing now (assert build):\n\nv70 0.33s\nv72 1.15s\nv73 1.32\n\nTo trim that back, I think we should give up on using shared memory\nfor the is-full test: We can cause aset to malloc a new block with a\nlot fewer entries. In the attached, this brings it back down to 0.43s.\nIt might also be worth reducing the number of blocks in the random\ntest -- multiple runs will have different offsets anyway.\n\n> > I think we can stop including the debug-tid-store patch for CI now.\n> > That would allow getting rid of some unnecessary variables.\n>\n> Agreed.\n\nOkay, all that remains here is to get rid of those variables (might be\njust one).\n\n> > + * Scan the TidStore and return a pointer to TidStoreIterResult that has TIDs\n> > + * in one block. We return the block numbers in ascending order and the offset\n> > + * numbers in each result is also sorted in ascending order.\n> > + */\n> > +TidStoreIterResult *\n> > +TidStoreIterateNext(TidStoreIter *iter)\n> >\n> > The wording is a bit awkward.\n>\n> Fixed.\n\n- * Scan the TidStore and return a pointer to TidStoreIterResult that has TIDs\n- * in one block. We return the block numbers in ascending order and the offset\n- * numbers in each result is also sorted in ascending order.\n+ * Scan the TidStore and return the TIDs of the next block. The returned block\n+ * numbers is sorted in ascending order, and the offset numbers in each result\n+ * is also sorted in ascending order.\n\nBetter, but it's still not very clear. Maybe \"The offsets in each\niteration result are ordered, as are the block numbers over all\niterations.\"\n\n> > +/* Extract TIDs from the given key-value pair */\n> > +static void\n> > +tidstore_iter_extract_tids(TidStoreIter *iter, uint64 key,\n> > BlocktableEntry *page)\n> >\n> > This is a leftover from the old encoding scheme. This should really\n> > take a \"BlockNumber blockno\" not a \"key\", and the only call site\n> > should probably cast the uint64 to BlockNumber.\n>\n> Fixed.\n\nThis part looks good. I didn't notice earlier, but this comment has a\nsimilar issue\n\n@@ -384,14 +391,15 @@ TidStoreIterateNext(TidStoreIter *iter)\n return NULL;\n\n /* Collect TIDs extracted from the key-value pair */\n- tidstore_iter_extract_tids(iter, key, page);\n+ tidstore_iter_extract_tids(iter, (BlockNumber) key, page);\n\n...\"extracted\" was once a separate operation. I think just removing\nthat one word is enough to update it.\n\nSome other review on code comments:\n\nv73-0001:\n\n+ /* Enlarge the TID array if necessary */\n\nIt's \"arrays\" now.\n\nv73-0005:\n\n+-- Random TIDs test. We insert TIDs for 1000 blocks. Each block has\n+-- different randon 100 offset numbers each other.\n\nThe numbers are obvious from the query. Maybe just mention that the\noffsets are randomized and must be unique and ordered.\n\n+ * The caller is responsible for release any locks.\n\n\"releasing\"\n\n> > +typedef struct BlocktableEntry\n> > +{\n> > + uint16 nwords;\n> > + bitmapword words[FLEXIBLE_ARRAY_MEMBER];\n> > +} BlocktableEntry;\n> >\n> > In my WIP for runtime-embeddable offsets, nwords needs to be one byte.\n\nI should be more clear here: nwords fitting into one byte allows 3\nembedded offsets (1 on 32-bit platforms, which is good for testing at\nleast). With uint16 nwords that reduces to 2 (none on 32-bit\nplatforms). Further, after the current patch series is fully\ncommitted, I plan to split the embedded-offset patch into two parts:\nThe first would store the offsets in the header, but would still need\na (smaller) allocation. The second would embed them in the child\npointer. Only the second patch will care about the size of nwords\nbecause it needs to reserve a byte for the pointer tag.\n\n> > That doesn't have any real-world affect on the largest offset\n> > encountered, and only in 32-bit builds with 32kB block size would the\n> > theoretical max change at all. To be precise, we could use in the\n> > MaxBlocktableEntrySize calculation:\n> >\n> > Min(MaxOffsetNumber, BITS_PER_BITMAPWORD * PG_INT8_MAX - 1);\n>\n> I don't get this expression. Making the nwords one byte works well?\n> With 8kB blocks, MaxOffsetNumber is 2048 and it requires 256\n> bitmapword entries on 64-bit OS or 512 bitmapword entries on 32-bit\n> OS, respectively. One byte nwrods variable seems not to be sufficient\n\nI believe there is confusion between bitmap words and bytes:\n2048 / 64 = 32 words = 256 bytes\n\nIt used to be max tuples per (heap) page, but we wanted a simple way\nto make this independent of heap. I believe we won't need to ever\nstore the actual MaxOffsetNumber, although we technically still could\nwith a one-byte type and 32kB pages, at least on 64-bit platforms.\n\n> for both cases. Also, where does the expression \"BITS_PER_BITMAPWORD *\n> PG_INT8_MAX - 1\" come from?\n\n127 words, each with 64 (or 32) bits. The zero bit is not a valid\noffset, so subtract one. And I used signed type in case there was a\nneed for -1 to mean something.", "msg_date": "Tue, 19 Mar 2024 06:35:39 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Mar 19, 2024 at 8:35 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Mon, Mar 18, 2024 at 11:12 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Sun, Mar 17, 2024 at 11:46 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > Random offsets is what I was thinking of (if made distinct and\n> > > ordered), but even there the code is fairy trivial, so I don't have a\n> > > strong feeling about it.\n> >\n> > Agreed.\n>\n> Looks good.\n>\n> A related thing I should mention is that the tests which look up all\n> possible offsets are really expensive with the number of blocks we're\n> using now (assert build):\n>\n> v70 0.33s\n> v72 1.15s\n> v73 1.32\n>\n> To trim that back, I think we should give up on using shared memory\n> for the is-full test: We can cause aset to malloc a new block with a\n> lot fewer entries. In the attached, this brings it back down to 0.43s.\n\nLooks good. Agreed with this change.\n\n> It might also be worth reducing the number of blocks in the random\n> test -- multiple runs will have different offsets anyway.\n\nYes. If we reduce the number of blocks from 1000 to 100, the\nregression test took on my environment:\n\n1000 blocks : 516 ms\n100 blocks : 228 ms\n\n>\n> > > I think we can stop including the debug-tid-store patch for CI now.\n> > > That would allow getting rid of some unnecessary variables.\n> >\n> > Agreed.\n>\n> Okay, all that remains here is to get rid of those variables (might be\n> just one).\n\nRemoved some unnecessary variables in 0002 patch.\n\n>\n> > > + * Scan the TidStore and return a pointer to TidStoreIterResult that has TIDs\n> > > + * in one block. We return the block numbers in ascending order and the offset\n> > > + * numbers in each result is also sorted in ascending order.\n> > > + */\n> > > +TidStoreIterResult *\n> > > +TidStoreIterateNext(TidStoreIter *iter)\n> > >\n> > > The wording is a bit awkward.\n> >\n> > Fixed.\n>\n> - * Scan the TidStore and return a pointer to TidStoreIterResult that has TIDs\n> - * in one block. We return the block numbers in ascending order and the offset\n> - * numbers in each result is also sorted in ascending order.\n> + * Scan the TidStore and return the TIDs of the next block. The returned block\n> + * numbers is sorted in ascending order, and the offset numbers in each result\n> + * is also sorted in ascending order.\n>\n> Better, but it's still not very clear. Maybe \"The offsets in each\n> iteration result are ordered, as are the block numbers over all\n> iterations.\"\n\nThanks, fixed.\n\n>\n> > > +/* Extract TIDs from the given key-value pair */\n> > > +static void\n> > > +tidstore_iter_extract_tids(TidStoreIter *iter, uint64 key,\n> > > BlocktableEntry *page)\n> > >\n> > > This is a leftover from the old encoding scheme. This should really\n> > > take a \"BlockNumber blockno\" not a \"key\", and the only call site\n> > > should probably cast the uint64 to BlockNumber.\n> >\n> > Fixed.\n>\n> This part looks good. I didn't notice earlier, but this comment has a\n> similar issue\n>\n> @@ -384,14 +391,15 @@ TidStoreIterateNext(TidStoreIter *iter)\n> return NULL;\n>\n> /* Collect TIDs extracted from the key-value pair */\n> - tidstore_iter_extract_tids(iter, key, page);\n> + tidstore_iter_extract_tids(iter, (BlockNumber) key, page);\n>\n> ...\"extracted\" was once a separate operation. I think just removing\n> that one word is enough to update it.\n\nFixed.\n\n>\n> Some other review on code comments:\n>\n> v73-0001:\n>\n> + /* Enlarge the TID array if necessary */\n>\n> It's \"arrays\" now.\n>\n> v73-0005:\n>\n> +-- Random TIDs test. We insert TIDs for 1000 blocks. Each block has\n> +-- different randon 100 offset numbers each other.\n>\n> The numbers are obvious from the query. Maybe just mention that the\n> offsets are randomized and must be unique and ordered.\n>\n> + * The caller is responsible for release any locks.\n>\n> \"releasing\"\n\nFixed.\n\n>\n> > > +typedef struct BlocktableEntry\n> > > +{\n> > > + uint16 nwords;\n> > > + bitmapword words[FLEXIBLE_ARRAY_MEMBER];\n> > > +} BlocktableEntry;\n> > >\n> > > In my WIP for runtime-embeddable offsets, nwords needs to be one byte.\n>\n> I should be more clear here: nwords fitting into one byte allows 3\n> embedded offsets (1 on 32-bit platforms, which is good for testing at\n> least). With uint16 nwords that reduces to 2 (none on 32-bit\n> platforms). Further, after the current patch series is fully\n> committed, I plan to split the embedded-offset patch into two parts:\n> The first would store the offsets in the header, but would still need\n> a (smaller) allocation. The second would embed them in the child\n> pointer. Only the second patch will care about the size of nwords\n> because it needs to reserve a byte for the pointer tag.\n\nThank you for the clarification.\n\n>\n> > > That doesn't have any real-world affect on the largest offset\n> > > encountered, and only in 32-bit builds with 32kB block size would the\n> > > theoretical max change at all. To be precise, we could use in the\n> > > MaxBlocktableEntrySize calculation:\n> > >\n> > > Min(MaxOffsetNumber, BITS_PER_BITMAPWORD * PG_INT8_MAX - 1);\n> >\n> > I don't get this expression. Making the nwords one byte works well?\n> > With 8kB blocks, MaxOffsetNumber is 2048 and it requires 256\n> > bitmapword entries on 64-bit OS or 512 bitmapword entries on 32-bit\n> > OS, respectively. One byte nwrods variable seems not to be sufficient\n>\n> I believe there is confusion between bitmap words and bytes:\n> 2048 / 64 = 32 words = 256 bytes\n\nOops, you're right.\n\n>\n> It used to be max tuples per (heap) page, but we wanted a simple way\n> to make this independent of heap. I believe we won't need to ever\n> store the actual MaxOffsetNumber, although we technically still could\n> with a one-byte type and 32kB pages, at least on 64-bit platforms.\n>\n> > for both cases. Also, where does the expression \"BITS_PER_BITMAPWORD *\n> > PG_INT8_MAX - 1\" come from?\n>\n> 127 words, each with 64 (or 32) bits. The zero bit is not a valid\n> offset, so subtract one. And I used signed type in case there was a\n> need for -1 to mean something.\n\nOkay, I missed that we want to change nwords from uint8 to int8.\n\nSo the MaxBlocktableEntrySize calculation would be as follows?\n\n#define MaxBlocktableEntrySize \\\n offsetof(BlocktableEntry, words) + \\\n (sizeof(bitmapword) * \\\n WORDS_PER_PAGE(Min(MaxOffsetNumber, \\\n BITS_PER_BITMAPWORD * PG_INT8_MAX - 1))))\n\nI've made this change in the 0003 patch.\n\nWhile reviewing the vacuum patch, I realized that we always pass\nLWTRANCHE_SHARED_TIDSTORE to RT_CREATE(), and the wait event related\nto the tidstore is therefore always the same. I think it would be\nbetter to make the caller of TidStoreCreate() specify the tranch_id\nand pass it to RT_CREATE(). That way, the caller can specify their own\nwait event for tidstore. The 0008 patch tried this idea. dshash.c does\nthe same idea.\n\nOther patches are minor updates for tidstore and vacuum patches.\n\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Tue, 19 Mar 2024 12:23:47 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Mar 19, 2024 at 10:24 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Tue, Mar 19, 2024 at 8:35 AM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Mon, Mar 18, 2024 at 11:12 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Sun, Mar 17, 2024 at 11:46 AM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > It might also be worth reducing the number of blocks in the random\n> > test -- multiple runs will have different offsets anyway.\n>\n> Yes. If we reduce the number of blocks from 1000 to 100, the\n> regression test took on my environment:\n>\n> 1000 blocks : 516 ms\n> 100 blocks : 228 ms\n\nSounds good.\n\n> Removed some unnecessary variables in 0002 patch.\n\nLooks good.\n\n> So the MaxBlocktableEntrySize calculation would be as follows?\n>\n> #define MaxBlocktableEntrySize \\\n> offsetof(BlocktableEntry, words) + \\\n> (sizeof(bitmapword) * \\\n> WORDS_PER_PAGE(Min(MaxOffsetNumber, \\\n> BITS_PER_BITMAPWORD * PG_INT8_MAX - 1))))\n>\n> I've made this change in the 0003 patch.\n\nThis is okay, but one side effect is that we have both an assert and\nan elog, for different limits. I think we'll need a separate #define\nto help. But for now, I don't want to hold up tidstore further with\nthis because I believe almost everything else in v74 is in pretty good\nshape. I'll save this for later as a part of the optimization I\nproposed.\n\nRemaining things I noticed:\n\n+#define RT_PREFIX local_rt\n+#define RT_PREFIX shared_rt\n\nPrefixes for simplehash, for example, don't have \"sh\" -- maybe \"local/shared_ts\"\n\n+ /* MemoryContext where the radix tree uses */\n\ns/where/that/\n\n+/*\n+ * Lock support functions.\n+ *\n+ * We can use the radix tree's lock for shared TidStore as the data we\n+ * need to protect is only the shared radix tree.\n+ */\n+void\n+TidStoreLockExclusive(TidStore *ts)\n\nTalking about multiple things, so maybe a blank line after the comment.\n\nWith those, I think you can go ahead and squash all the tidstore\npatches except for 0003 and commit it.\n\n> While reviewing the vacuum patch, I realized that we always pass\n> LWTRANCHE_SHARED_TIDSTORE to RT_CREATE(), and the wait event related\n> to the tidstore is therefore always the same. I think it would be\n> better to make the caller of TidStoreCreate() specify the tranch_id\n> and pass it to RT_CREATE(). That way, the caller can specify their own\n> wait event for tidstore. The 0008 patch tried this idea. dshash.c does\n> the same idea.\n\nSounds reasonable. I'll just note that src/include/storage/lwlock.h\nstill has an entry for LWTRANCHE_SHARED_TIDSTORE.\n\n\n", "msg_date": "Tue, 19 Mar 2024 16:40:06 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Tue, Mar 19, 2024 at 6:40 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Tue, Mar 19, 2024 at 10:24 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Tue, Mar 19, 2024 at 8:35 AM John Naylor <johncnaylorls@gmail.com> wrote:\n> > >\n> > > On Mon, Mar 18, 2024 at 11:12 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > > >\n> > > > On Sun, Mar 17, 2024 at 11:46 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > It might also be worth reducing the number of blocks in the random\n> > > test -- multiple runs will have different offsets anyway.\n> >\n> > Yes. If we reduce the number of blocks from 1000 to 100, the\n> > regression test took on my environment:\n> >\n> > 1000 blocks : 516 ms\n> > 100 blocks : 228 ms\n>\n> Sounds good.\n>\n> > Removed some unnecessary variables in 0002 patch.\n>\n> Looks good.\n>\n> > So the MaxBlocktableEntrySize calculation would be as follows?\n> >\n> > #define MaxBlocktableEntrySize \\\n> > offsetof(BlocktableEntry, words) + \\\n> > (sizeof(bitmapword) * \\\n> > WORDS_PER_PAGE(Min(MaxOffsetNumber, \\\n> > BITS_PER_BITMAPWORD * PG_INT8_MAX - 1))))\n> >\n> > I've made this change in the 0003 patch.\n>\n> This is okay, but one side effect is that we have both an assert and\n> an elog, for different limits. I think we'll need a separate #define\n> to help. But for now, I don't want to hold up tidstore further with\n> this because I believe almost everything else in v74 is in pretty good\n> shape. I'll save this for later as a part of the optimization I\n> proposed.\n>\n> Remaining things I noticed:\n>\n> +#define RT_PREFIX local_rt\n> +#define RT_PREFIX shared_rt\n>\n> Prefixes for simplehash, for example, don't have \"sh\" -- maybe \"local/shared_ts\"\n>\n> + /* MemoryContext where the radix tree uses */\n>\n> s/where/that/\n>\n> +/*\n> + * Lock support functions.\n> + *\n> + * We can use the radix tree's lock for shared TidStore as the data we\n> + * need to protect is only the shared radix tree.\n> + */\n> +void\n> +TidStoreLockExclusive(TidStore *ts)\n>\n> Talking about multiple things, so maybe a blank line after the comment.\n>\n> With those, I think you can go ahead and squash all the tidstore\n> patches except for 0003 and commit it.\n>\n> > While reviewing the vacuum patch, I realized that we always pass\n> > LWTRANCHE_SHARED_TIDSTORE to RT_CREATE(), and the wait event related\n> > to the tidstore is therefore always the same. I think it would be\n> > better to make the caller of TidStoreCreate() specify the tranch_id\n> > and pass it to RT_CREATE(). That way, the caller can specify their own\n> > wait event for tidstore. The 0008 patch tried this idea. dshash.c does\n> > the same idea.\n>\n> Sounds reasonable. I'll just note that src/include/storage/lwlock.h\n> still has an entry for LWTRANCHE_SHARED_TIDSTORE.\n\nThank you. I've incorporated all the comments above. I've attached the\nlatest patches, and am going to push them (one by one) after\nself-review again.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Wed, 20 Mar 2024 01:40:25 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 14, 2024 at 12:06 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Mar 14, 2024 at 1:29 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > Locally (not CI), we should try big inputs to make sure we can\n> > actually go up to many GB -- it's easier and faster this way than\n> > having vacuum give us a large data set.\n>\n> I'll do these tests.\n\nI just remembered this -- did any of this kind of testing happen? I\ncan do it as well.\n\n> Thank you. I've incorporated all the comments above. I've attached the\n> latest patches, and am going to push them (one by one) after\n> self-review again.\n\nOne more cosmetic thing in 0001 that caught my eye:\n\ndiff --git a/src/backend/access/common/Makefile\nb/src/backend/access/common/Makefile\nindex b9aff0ccfd..67b8cc6108 100644\n--- a/src/backend/access/common/Makefile\n+++ b/src/backend/access/common/Makefile\n@@ -27,6 +27,7 @@ OBJS = \\\n syncscan.o \\\n toast_compression.o \\\n toast_internals.o \\\n+ tidstore.o \\\n tupconvert.o \\\n tupdesc.o\n\ndiff --git a/src/backend/access/common/meson.build\nb/src/backend/access/common/meson.build\nindex 725041a4ce..a02397855e 100644\n--- a/src/backend/access/common/meson.build\n+++ b/src/backend/access/common/meson.build\n@@ -15,6 +15,7 @@ backend_sources += files(\n 'syncscan.c',\n 'toast_compression.c',\n 'toast_internals.c',\n+ 'tidstore.c',\n 'tupconvert.c',\n 'tupdesc.c',\n )\n\nThese aren't in alphabetical order.\n\n\n", "msg_date": "Wed, 20 Mar 2024 13:48:10 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 20, 2024 at 3:48 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 14, 2024 at 12:06 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Mar 14, 2024 at 1:29 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > Locally (not CI), we should try big inputs to make sure we can\n> > > actually go up to many GB -- it's easier and faster this way than\n> > > having vacuum give us a large data set.\n> >\n> > I'll do these tests.\n>\n> I just remembered this -- did any of this kind of testing happen? I\n> can do it as well.\n\nI forgot to report the results. Yes, I did some tests where I inserted\nmany TIDs to make the tidstore use several GB memory. I did two cases:\n\n1. insert 100M blocks of TIDs with an offset of 100.\n2. insert 10M blocks of TIDs with an offset of 2048.\n\nThe tidstore used about 4.8GB and 5.2GB, respectively, and all lookup\nand iteration results were expected.\n\n>\n> > Thank you. I've incorporated all the comments above. I've attached the\n> > latest patches, and am going to push them (one by one) after\n> > self-review again.\n>\n> One more cosmetic thing in 0001 that caught my eye:\n>\n> diff --git a/src/backend/access/common/Makefile\n> b/src/backend/access/common/Makefile\n> index b9aff0ccfd..67b8cc6108 100644\n> --- a/src/backend/access/common/Makefile\n> +++ b/src/backend/access/common/Makefile\n> @@ -27,6 +27,7 @@ OBJS = \\\n> syncscan.o \\\n> toast_compression.o \\\n> toast_internals.o \\\n> + tidstore.o \\\n> tupconvert.o \\\n> tupdesc.o\n>\n> diff --git a/src/backend/access/common/meson.build\n> b/src/backend/access/common/meson.build\n> index 725041a4ce..a02397855e 100644\n> --- a/src/backend/access/common/meson.build\n> +++ b/src/backend/access/common/meson.build\n> @@ -15,6 +15,7 @@ backend_sources += files(\n> 'syncscan.c',\n> 'toast_compression.c',\n> 'toast_internals.c',\n> + 'tidstore.c',\n> 'tupconvert.c',\n> 'tupdesc.c',\n> )\n>\n> These aren't in alphabetical order.\n\nGood catch. I'll fix them before the push.\n\nWhile reviewing the codes again, the following two things caught my eyes:\n\nin check_set_block_offset() function, we don't take a lock on the\ntidstore while checking all possible TIDs. I'll add\nTidStoreLockShare() and TidStoreUnlock() as follows:\n\n+ TidStoreLockShare(tidstore);\n if (TidStoreIsMember(tidstore, &tid))\n ItemPointerSet(&items.lookup_tids[num_lookup_tids++],\nblkno, offset);\n+ TidStoreUnlock(tidstore);\n\n---\nRegarding TidStoreMemoryUsage(), IIUC the caller doesn't need to take\na lock on the shared tidstore since dsa_get_total_size() (called by\nRT_MEMORY_USAGE()) does appropriate locking. I think we can mention it\nin the comment as follows:\n\n-/* Return the memory usage of TidStore */\n+/*\n+ * Return the memory usage of TidStore.\n+ *\n+ * In shared TidStore cases, since shared_ts_memory_usage() does appropriate\n+ * locking, the caller doesn't need to take a lock.\n+ */\n\nWhat do you think?\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Wed, 20 Mar 2024 22:30:06 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 20, 2024 at 8:30 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> I forgot to report the results. Yes, I did some tests where I inserted\n> many TIDs to make the tidstore use several GB memory. I did two cases:\n>\n> 1. insert 100M blocks of TIDs with an offset of 100.\n> 2. insert 10M blocks of TIDs with an offset of 2048.\n>\n> The tidstore used about 4.8GB and 5.2GB, respectively, and all lookup\n> and iteration results were expected.\n\nThanks for confirming!\n\n> While reviewing the codes again, the following two things caught my eyes:\n>\n> in check_set_block_offset() function, we don't take a lock on the\n> tidstore while checking all possible TIDs. I'll add\n> TidStoreLockShare() and TidStoreUnlock() as follows:\n>\n> + TidStoreLockShare(tidstore);\n> if (TidStoreIsMember(tidstore, &tid))\n> ItemPointerSet(&items.lookup_tids[num_lookup_tids++],\n> blkno, offset);\n> + TidStoreUnlock(tidstore);\n\nIn one sense, all locking in the test module is useless since there is\nonly a single process. On the other hand, it seems good to at least\nrun what we have written to run it trivially, and serve as an example\nof usage. We should probably be consistent, and document at the top\nthat the locks are pro-forma only.\n\nIt's both a blessing and a curse that vacuum only has a single writer.\nIt makes development less of a hassle, but also means that tidstore\nlocking is done for API-completeness reasons, not (yet) as a practical\nnecessity. Even tidbitmap.c's hash table currently has a single\nwriter, and while using tidstore for that is still an engineering\nchallenge for other reasons, it wouldn't exercise locking\nmeaningfully, either, at least at first.\n\n> Regarding TidStoreMemoryUsage(), IIUC the caller doesn't need to take\n> a lock on the shared tidstore since dsa_get_total_size() (called by\n> RT_MEMORY_USAGE()) does appropriate locking. I think we can mention it\n> in the comment as follows:\n>\n> -/* Return the memory usage of TidStore */\n> +/*\n> + * Return the memory usage of TidStore.\n> + *\n> + * In shared TidStore cases, since shared_ts_memory_usage() does appropriate\n> + * locking, the caller doesn't need to take a lock.\n> + */\n>\n> What do you think?\n\nThat duplicates the underlying comment on the radix tree function that\nthis calls, so I'm inclined to leave it out. At this level it's\nprobably best to document when a caller _does_ need to take an action.\n\nOne thing I forgot to ask about earlier:\n\n+-- Add tids in out of order.\n\nAre they (the blocks to be precise) really out of order? The VALUES\nstatement is ordered, but after inserting it does not output that way.\nI wondered if this is platform independent, but CI and our dev\nmachines haven't failed this test, and I haven't looked into what\ndetermines the order. It's easy enough to hide the blocks if we ever\nneed to, as we do elsewhere...\n\n\n", "msg_date": "Wed, 20 Mar 2024 21:19:46 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 20, 2024 at 11:19 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Wed, Mar 20, 2024 at 8:30 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > I forgot to report the results. Yes, I did some tests where I inserted\n> > many TIDs to make the tidstore use several GB memory. I did two cases:\n> >\n> > 1. insert 100M blocks of TIDs with an offset of 100.\n> > 2. insert 10M blocks of TIDs with an offset of 2048.\n> >\n> > The tidstore used about 4.8GB and 5.2GB, respectively, and all lookup\n> > and iteration results were expected.\n>\n> Thanks for confirming!\n>\n> > While reviewing the codes again, the following two things caught my eyes:\n> >\n> > in check_set_block_offset() function, we don't take a lock on the\n> > tidstore while checking all possible TIDs. I'll add\n> > TidStoreLockShare() and TidStoreUnlock() as follows:\n> >\n> > + TidStoreLockShare(tidstore);\n> > if (TidStoreIsMember(tidstore, &tid))\n> > ItemPointerSet(&items.lookup_tids[num_lookup_tids++],\n> > blkno, offset);\n> > + TidStoreUnlock(tidstore);\n>\n> In one sense, all locking in the test module is useless since there is\n> only a single process. On the other hand, it seems good to at least\n> run what we have written to run it trivially, and serve as an example\n> of usage. We should probably be consistent, and document at the top\n> that the locks are pro-forma only.\n\nAgreed.\n\n>\n> > Regarding TidStoreMemoryUsage(), IIUC the caller doesn't need to take\n> > a lock on the shared tidstore since dsa_get_total_size() (called by\n> > RT_MEMORY_USAGE()) does appropriate locking. I think we can mention it\n> > in the comment as follows:\n> >\n> > -/* Return the memory usage of TidStore */\n> > +/*\n> > + * Return the memory usage of TidStore.\n> > + *\n> > + * In shared TidStore cases, since shared_ts_memory_usage() does appropriate\n> > + * locking, the caller doesn't need to take a lock.\n> > + */\n> >\n> > What do you think?\n>\n> That duplicates the underlying comment on the radix tree function that\n> this calls, so I'm inclined to leave it out. At this level it's\n> probably best to document when a caller _does_ need to take an action.\n\nOkay, I didn't change it.\n\n>\n> One thing I forgot to ask about earlier:\n>\n> +-- Add tids in out of order.\n>\n> Are they (the blocks to be precise) really out of order? The VALUES\n> statement is ordered, but after inserting it does not output that way.\n> I wondered if this is platform independent, but CI and our dev\n> machines haven't failed this test, and I haven't looked into what\n> determines the order. It's easy enough to hide the blocks if we ever\n> need to, as we do elsewhere...\n\nIt seems not necessary as such a test is already covered by\ntest_radixtree. I've changed the query to hide the output blocks.\n\nI've pushed the tidstore patch after incorporating the above changes.\nIn addition to that, I've added the following changes before the push:\n\n- Added src/test/modules/test_tidstore/.gitignore file.\n- Removed unnecessary #include from tidstore.c.\n\nThe buildfarm has been all-green so far.\n\nI've attached the latest vacuum improvement patch.\n\nI just remembered that the tidstore cannot still be used for parallel\nvacuum with minimum maintenance_work_mem. Even when the shared\ntidstore is empty, its memory usage reports 1056768 bytes, a bit above\n1MB (1048576 bytes). We need something discussed on another thread[1]\nin order to make it work.\n\nRegards,\n\n[1] https://www.postgresql.org/message-id/CAD21AoCVMw6DSmgZY9h%2BxfzKtzJeqWiwxaUD2T-FztVcV-XibQ%40mail.gmail.com\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 21 Mar 2024 11:37:16 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 21, 2024 at 9:37 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Wed, Mar 20, 2024 at 11:19 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > Are they (the blocks to be precise) really out of order? The VALUES\n> > statement is ordered, but after inserting it does not output that way.\n> > I wondered if this is platform independent, but CI and our dev\n> > machines haven't failed this test, and I haven't looked into what\n> > determines the order. It's easy enough to hide the blocks if we ever\n> > need to, as we do elsewhere...\n>\n> It seems not necessary as such a test is already covered by\n> test_radixtree. I've changed the query to hide the output blocks.\n\nOkay.\n\n> The buildfarm has been all-green so far.\n\nGreat!\n\n> I've attached the latest vacuum improvement patch.\n>\n> I just remembered that the tidstore cannot still be used for parallel\n> vacuum with minimum maintenance_work_mem. Even when the shared\n> tidstore is empty, its memory usage reports 1056768 bytes, a bit above\n> 1MB (1048576 bytes). We need something discussed on another thread[1]\n> in order to make it work.\n\nFor exactly this reason, we used to have a clamp on max_bytes when it\nwas internal to tidstore, so that it never reported full when first\ncreated, so I guess that got thrown away when we got rid of the\ncontrol object in shared memory. Forcing callers to clamp their own\nlimits seems pretty unfriendly, though.\n\nThe proposals in that thread are pretty simple. If those don't move\nforward soon, a hackish workaround would be to round down the number\nwe get from dsa_get_total_size to the nearest megabyte. Then\ncontrolling min/max segment size would be a nice-to-have for PG17, not\na prerequisite.\n\n\n", "msg_date": "Thu, 21 Mar 2024 10:40:05 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 21, 2024 at 12:40 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 21, 2024 at 9:37 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Wed, Mar 20, 2024 at 11:19 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > Are they (the blocks to be precise) really out of order? The VALUES\n> > > statement is ordered, but after inserting it does not output that way.\n> > > I wondered if this is platform independent, but CI and our dev\n> > > machines haven't failed this test, and I haven't looked into what\n> > > determines the order. It's easy enough to hide the blocks if we ever\n> > > need to, as we do elsewhere...\n> >\n> > It seems not necessary as such a test is already covered by\n> > test_radixtree. I've changed the query to hide the output blocks.\n>\n> Okay.\n>\n> > The buildfarm has been all-green so far.\n>\n> Great!\n>\n> > I've attached the latest vacuum improvement patch.\n> >\n> > I just remembered that the tidstore cannot still be used for parallel\n> > vacuum with minimum maintenance_work_mem. Even when the shared\n> > tidstore is empty, its memory usage reports 1056768 bytes, a bit above\n> > 1MB (1048576 bytes). We need something discussed on another thread[1]\n> > in order to make it work.\n>\n> For exactly this reason, we used to have a clamp on max_bytes when it\n> was internal to tidstore, so that it never reported full when first\n> created, so I guess that got thrown away when we got rid of the\n> control object in shared memory. Forcing callers to clamp their own\n> limits seems pretty unfriendly, though.\n\nOr we can have a new function for dsa.c to set the initial and max\nsegment size (or either one) to the existing DSA area so that\nTidStoreCreate() can specify them at creation. In shared TidStore\ncases, since all memory required by shared radix tree is allocated in\nthe passed-in DSA area and the memory usage is the total segment size\nallocated in the DSA area, the user will have to prepare a DSA area\nonly for the shared tidstore. So we might be able to expect that the\nDSA passed-in to TidStoreCreate() is empty and its segment sizes can\nbe adjustable.\n\n>\n> The proposals in that thread are pretty simple. If those don't move\n> forward soon, a hackish workaround would be to round down the number\n> we get from dsa_get_total_size to the nearest megabyte. Then\n> controlling min/max segment size would be a nice-to-have for PG17, not\n> a prerequisite.\n\nInteresting idea.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 21 Mar 2024 15:10:30 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 21, 2024 at 3:10 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Mar 21, 2024 at 12:40 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Thu, Mar 21, 2024 at 9:37 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Wed, Mar 20, 2024 at 11:19 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > > Are they (the blocks to be precise) really out of order? The VALUES\n> > > > statement is ordered, but after inserting it does not output that way.\n> > > > I wondered if this is platform independent, but CI and our dev\n> > > > machines haven't failed this test, and I haven't looked into what\n> > > > determines the order. It's easy enough to hide the blocks if we ever\n> > > > need to, as we do elsewhere...\n> > >\n> > > It seems not necessary as such a test is already covered by\n> > > test_radixtree. I've changed the query to hide the output blocks.\n> >\n> > Okay.\n> >\n> > > The buildfarm has been all-green so far.\n> >\n> > Great!\n> >\n> > > I've attached the latest vacuum improvement patch.\n> > >\n> > > I just remembered that the tidstore cannot still be used for parallel\n> > > vacuum with minimum maintenance_work_mem. Even when the shared\n> > > tidstore is empty, its memory usage reports 1056768 bytes, a bit above\n> > > 1MB (1048576 bytes). We need something discussed on another thread[1]\n> > > in order to make it work.\n> >\n> > For exactly this reason, we used to have a clamp on max_bytes when it\n> > was internal to tidstore, so that it never reported full when first\n> > created, so I guess that got thrown away when we got rid of the\n> > control object in shared memory. Forcing callers to clamp their own\n> > limits seems pretty unfriendly, though.\n>\n> Or we can have a new function for dsa.c to set the initial and max\n> segment size (or either one) to the existing DSA area so that\n> TidStoreCreate() can specify them at creation. In shared TidStore\n> cases, since all memory required by shared radix tree is allocated in\n> the passed-in DSA area and the memory usage is the total segment size\n> allocated in the DSA area, the user will have to prepare a DSA area\n> only for the shared tidstore. So we might be able to expect that the\n> DSA passed-in to TidStoreCreate() is empty and its segment sizes can\n> be adjustable.\n\nYet another idea is that TidStore creates its own DSA area in\nTidStoreCreate(). That is, In TidStoreCreate() we create a DSA area\n(using dsa_create()) and pass it to RT_CREATE(). Also, we need a new\nAPI to get the DSA area. The caller (e.g. parallel vacuum) gets the\ndsa_handle of the DSA and stores it in the shared memory (e.g. in\nPVShared). TidStoreAttach() will take two arguments: dsa_handle for\nthe DSA area and dsa_pointer for the shared radix tree. This idea\nstill requires controlling min/max segment sizes since dsa_create()\nuses the 1MB as the initial segment size. But the TidStoreCreate()\nwould be more user friendly.\n\nI've attached a PoC patch for discussion.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 21 Mar 2024 16:02:05 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 21, 2024 at 1:11 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> Or we can have a new function for dsa.c to set the initial and max\n> segment size (or either one) to the existing DSA area so that\n> TidStoreCreate() can specify them at creation.\n\nI didn't like this very much, because it's splitting an operation\nacross an API boundary. The caller already has all the information it\nneeds when it creates the DSA. Straw man proposal: it could do the\nsame for local memory, then they'd be more similar. But if we made\nlocal contexts the responsibility of the caller, that would cause\nduplication between creating and resetting.\n\n> In shared TidStore\n> cases, since all memory required by shared radix tree is allocated in\n> the passed-in DSA area and the memory usage is the total segment size\n> allocated in the DSA area\n\n...plus apparently some overhead, I just found out today, but that's\nbeside the point.\n\nOn Thu, Mar 21, 2024 at 2:02 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> Yet another idea is that TidStore creates its own DSA area in\n> TidStoreCreate(). That is, In TidStoreCreate() we create a DSA area\n> (using dsa_create()) and pass it to RT_CREATE(). Also, we need a new\n> API to get the DSA area. The caller (e.g. parallel vacuum) gets the\n> dsa_handle of the DSA and stores it in the shared memory (e.g. in\n> PVShared). TidStoreAttach() will take two arguments: dsa_handle for\n> the DSA area and dsa_pointer for the shared radix tree. This idea\n> still requires controlling min/max segment sizes since dsa_create()\n> uses the 1MB as the initial segment size. But the TidStoreCreate()\n> would be more user friendly.\n\nThis seems like an overall simplification, aside from future size\nconfiguration, so +1 to continue looking into this. If we go this\nroute, I'd like to avoid a boolean parameter and cleanly separate\nTidStoreCreateLocal() and TidStoreCreateShared(). Every operation\nafter that can introspect, but it's a bit awkward to force these cases\ninto the same function. It always was a little bit, but this change\nmakes it more so.\n\n\n", "msg_date": "Thu, 21 Mar 2024 14:35:08 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 21, 2024 at 4:35 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 21, 2024 at 1:11 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > Or we can have a new function for dsa.c to set the initial and max\n> > segment size (or either one) to the existing DSA area so that\n> > TidStoreCreate() can specify them at creation.\n>\n> I didn't like this very much, because it's splitting an operation\n> across an API boundary. The caller already has all the information it\n> needs when it creates the DSA. Straw man proposal: it could do the\n> same for local memory, then they'd be more similar. But if we made\n> local contexts the responsibility of the caller, that would cause\n> duplication between creating and resetting.\n\nFair point.\n\n>\n> > In shared TidStore\n> > cases, since all memory required by shared radix tree is allocated in\n> > the passed-in DSA area and the memory usage is the total segment size\n> > allocated in the DSA area\n>\n> ...plus apparently some overhead, I just found out today, but that's\n> beside the point.\n>\n> On Thu, Mar 21, 2024 at 2:02 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > Yet another idea is that TidStore creates its own DSA area in\n> > TidStoreCreate(). That is, In TidStoreCreate() we create a DSA area\n> > (using dsa_create()) and pass it to RT_CREATE(). Also, we need a new\n> > API to get the DSA area. The caller (e.g. parallel vacuum) gets the\n> > dsa_handle of the DSA and stores it in the shared memory (e.g. in\n> > PVShared). TidStoreAttach() will take two arguments: dsa_handle for\n> > the DSA area and dsa_pointer for the shared radix tree. This idea\n> > still requires controlling min/max segment sizes since dsa_create()\n> > uses the 1MB as the initial segment size. But the TidStoreCreate()\n> > would be more user friendly.\n>\n> This seems like an overall simplification, aside from future size\n> configuration, so +1 to continue looking into this. If we go this\n> route, I'd like to avoid a boolean parameter and cleanly separate\n> TidStoreCreateLocal() and TidStoreCreateShared(). Every operation\n> after that can introspect, but it's a bit awkward to force these cases\n> into the same function. It always was a little bit, but this change\n> makes it more so.\n\nI've looked into this idea further. Overall, it looks clean and I\ndon't see any problem so far in terms of integration with lazy vacuum.\nI've attached three patches for discussion and tests.\n\n- 0001 patch makes lazy vacuum use of tidstore.\n- 0002 patch makes DSA init/max segment size configurable (borrowed\nfrom another thread).\n- 0003 patch makes TidStore create its own DSA area with init/max DSA\nsegment adjustment (PoC patch).\n\nOne thing unclear to me is that this idea will be usable even when we\nwant to use the tidstore for parallel bitmap scan. Currently, we\ncreate a shared tidbitmap on a DSA area in ParallelExecutorInfo. This\nDSA area is used not only for tidbitmap but also for parallel hash\netc. If the tidstore created its own DSA area, parallel bitmap scan\nwould have to use the tidstore's DSA in addition to the DSA area in\nParallelExecutorInfo. I'm not sure if there are some differences\nbetween these usages in terms of resource manager etc. It seems no\nproblem but I might be missing something.\n\nRegards,\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 21 Mar 2024 18:02:47 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 21, 2024 at 4:03 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> I've looked into this idea further. Overall, it looks clean and I\n> don't see any problem so far in terms of integration with lazy vacuum.\n> I've attached three patches for discussion and tests.\n\nSeems okay in the big picture, it's the details we need to be careful of.\n\nv77-0001\n\n- dead_items = (VacDeadItems *) palloc(vac_max_items_to_alloc_size(max_items));\n- dead_items->max_items = max_items;\n- dead_items->num_items = 0;\n+ vacrel->dead_items = TidStoreCreate(vac_work_mem, NULL, 0);\n+\n+ dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));\n+ dead_items_info->max_bytes = vac_work_mem * 1024L;\n\nThis is confusing enough that it looks like a bug:\n\n[inside TidStoreCreate()]\n/* choose the maxBlockSize to be no larger than 1/16 of max_bytes */\nwhile (16 * maxBlockSize > max_bytes * 1024L)\nmaxBlockSize >>= 1;\n\nThis was copied from CreateWorkExprContext, which operates directly on\nwork_mem -- if the parameter is actually bytes, we can't \"* 1024\"\nhere. If we're passing something measured in kilobytes, the parameter\nis badly named. Let's use convert once and use bytes everywhere.\n\nNote: This was not another pass over the whole vacuum patch, just\nlooking an the issue at hand.\nAlso for later: Dilip Kumar reviewed an earlier version.\n\nv77-0002:\n\n+#define dsa_create(tranch_id) \\\n+ dsa_create_ext(tranch_id, DSA_INITIAL_SEGMENT_SIZE, DSA_MAX_SEGMENT_SIZE)\n\nSince these macros are now referring to defaults, maybe their name\nshould reflect that. Something like DSA_DEFAULT_INIT_SEGMENT_SIZE\n(*_MAX_*)\n\n+/* The minimum size of a DSM segment. */\n+#define DSA_MIN_SEGMENT_SIZE ((size_t) 1024)\n\nThat's a *lot* smaller than it is now. Maybe 256kB? We just want 1MB\nm_w_m to work correctly.\n\nv77-0003:\n\n+/* Public APIs to create local or shared TidStore */\n+\n+TidStore *\n+TidStoreCreateLocal(size_t max_bytes)\n+{\n+ return tidstore_create_internal(max_bytes, false, 0);\n+}\n+\n+TidStore *\n+TidStoreCreateShared(size_t max_bytes, int tranche_id)\n+{\n+ return tidstore_create_internal(max_bytes, true, tranche_id);\n+}\n\nI don't think these operations have enough in common to justify\nsharing even an internal implementation. Choosing aset block size is\ndone for both memory types, but it's pointless to do it for shared\nmemory, because the local context is then only used for small\nmetadata.\n\n+ /*\n+ * Choose the DSA initial and max segment sizes to be no longer than\n+ * 1/16 and 1/8 of max_bytes, respectively.\n+ */\n\nI'm guessing the 1/8 here because the number of segments is limited? I\nknow these numbers are somewhat arbitrary, but readers will wonder why\none has 1/8 and the other has 1/16.\n\n+ if (dsa_init_size < DSA_MIN_SEGMENT_SIZE)\n+ dsa_init_size = DSA_MIN_SEGMENT_SIZE;\n+ if (dsa_max_size < DSA_MAX_SEGMENT_SIZE)\n+ dsa_max_size = DSA_MAX_SEGMENT_SIZE;\n\nThe second clamp seems against the whole point of this patch -- it\nseems they should all be clamped bigger than the DSA_MIN_SEGMENT_SIZE?\nDid you try it with 1MB m_w_m?\n\n\n", "msg_date": "Thu, 21 Mar 2024 17:48:35 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 21, 2024 at 7:48 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 21, 2024 at 4:03 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > I've looked into this idea further. Overall, it looks clean and I\n> > don't see any problem so far in terms of integration with lazy vacuum.\n> > I've attached three patches for discussion and tests.\n>\n> Seems okay in the big picture, it's the details we need to be careful of.\n>\n> v77-0001\n>\n> - dead_items = (VacDeadItems *) palloc(vac_max_items_to_alloc_size(max_items));\n> - dead_items->max_items = max_items;\n> - dead_items->num_items = 0;\n> + vacrel->dead_items = TidStoreCreate(vac_work_mem, NULL, 0);\n> +\n> + dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));\n> + dead_items_info->max_bytes = vac_work_mem * 1024L;\n>\n> This is confusing enough that it looks like a bug:\n>\n> [inside TidStoreCreate()]\n> /* choose the maxBlockSize to be no larger than 1/16 of max_bytes */\n> while (16 * maxBlockSize > max_bytes * 1024L)\n> maxBlockSize >>= 1;\n>\n> This was copied from CreateWorkExprContext, which operates directly on\n> work_mem -- if the parameter is actually bytes, we can't \"* 1024\"\n> here. If we're passing something measured in kilobytes, the parameter\n> is badly named. Let's use convert once and use bytes everywhere.\n\nTrue. The attached 0001 patch fixes it.\n\n>\n> v77-0002:\n>\n> +#define dsa_create(tranch_id) \\\n> + dsa_create_ext(tranch_id, DSA_INITIAL_SEGMENT_SIZE, DSA_MAX_SEGMENT_SIZE)\n>\n> Since these macros are now referring to defaults, maybe their name\n> should reflect that. Something like DSA_DEFAULT_INIT_SEGMENT_SIZE\n> (*_MAX_*)\n\nIt makes sense to rename DSA_INITIAL_SEGMENT_SIZE , but I think that\nthe DSA_MAX_SEGMENT_SIZE is the theoretical maximum size, the current\nname also makes sense to me.\n\n>\n> +/* The minimum size of a DSM segment. */\n> +#define DSA_MIN_SEGMENT_SIZE ((size_t) 1024)\n>\n> That's a *lot* smaller than it is now. Maybe 256kB? We just want 1MB\n> m_w_m to work correctly.\n\nFixed.\n\n>\n> v77-0003:\n>\n> +/* Public APIs to create local or shared TidStore */\n> +\n> +TidStore *\n> +TidStoreCreateLocal(size_t max_bytes)\n> +{\n> + return tidstore_create_internal(max_bytes, false, 0);\n> +}\n> +\n> +TidStore *\n> +TidStoreCreateShared(size_t max_bytes, int tranche_id)\n> +{\n> + return tidstore_create_internal(max_bytes, true, tranche_id);\n> +}\n>\n> I don't think these operations have enough in common to justify\n> sharing even an internal implementation. Choosing aset block size is\n> done for both memory types, but it's pointless to do it for shared\n> memory, because the local context is then only used for small\n> metadata.\n>\n> + /*\n> + * Choose the DSA initial and max segment sizes to be no longer than\n> + * 1/16 and 1/8 of max_bytes, respectively.\n> + */\n>\n> I'm guessing the 1/8 here because the number of segments is limited? I\n> know these numbers are somewhat arbitrary, but readers will wonder why\n> one has 1/8 and the other has 1/16.\n>\n> + if (dsa_init_size < DSA_MIN_SEGMENT_SIZE)\n> + dsa_init_size = DSA_MIN_SEGMENT_SIZE;\n> + if (dsa_max_size < DSA_MAX_SEGMENT_SIZE)\n> + dsa_max_size = DSA_MAX_SEGMENT_SIZE;\n>\n> The second clamp seems against the whole point of this patch -- it\n> seems they should all be clamped bigger than the DSA_MIN_SEGMENT_SIZE?\n> Did you try it with 1MB m_w_m?\n\nI've incorporated the above comments and test results look good to me.\n\nI've attached the several patches:\n\n- 0002 is a minor fix for tidstore I found.\n- 0005 changes the create APIs of tidstore.\n- 0006 update the vacuum improvement patch to use the new\nTidStoreCreateLocal/Shared() APIs.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Fri, 22 Mar 2024 14:19:44 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "John Naylor <johncnaylorls@gmail.com> writes:\n> Done. I pushed this with a few last-minute cosmetic adjustments. This\n> has been a very long time coming, but we're finally in the home\n> stretch!\n\nI'm not sure why it took a couple weeks for Coverity to notice\nee1b30f12, but it saw it today, and it's not happy:\n\n/srv/coverity/git/pgsql-git/postgresql/src/include/lib/radixtree.h: 1621 in local_ts_extend_down()\n1615 \t\tnode = child;\n1616 \t\tshift -= RT_SPAN;\n1617 \t}\n1618 \n1619 \t/* Reserve slot for the value. */\n1620 \tn4 = (RT_NODE_4 *) node.local;\n>>> CID 1594658: Integer handling issues (BAD_SHIFT)\n>>> In expression \"key >> shift\", shifting by a negative amount has undefined behavior. The shift amount, \"shift\", is as little as -7.\n1621 \tn4->chunks[0] = RT_GET_KEY_CHUNK(key, shift);\n1622 \tn4->base.count = 1;\n1623 \n1624 \treturn &n4->children[0];\n1625 }\n1626 \n\nI think the point here is that if you start with an arbitrary\nnon-negative shift value, the preceding loop may in fact decrement it\ndown to something less than zero before exiting, in which case we\nwould indeed have trouble. I suspect that the code is making\nundocumented assumptions about the possible initial values of shift.\nMaybe some Asserts would be good? Also, if we're effectively assuming\nthat shift must be exactly zero here, why not let the compiler\nhard-code that?\n\n- \tn4->chunks[0] = RT_GET_KEY_CHUNK(key, shift);\n+ \tn4->chunks[0] = RT_GET_KEY_CHUNK(key, 0);\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 24 Mar 2024 12:53:36 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 25, 2024 at 1:53 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> John Naylor <johncnaylorls@gmail.com> writes:\n> > Done. I pushed this with a few last-minute cosmetic adjustments. This\n> > has been a very long time coming, but we're finally in the home\n> > stretch!\n\nThank you for the report.\n\n>\n> I'm not sure why it took a couple weeks for Coverity to notice\n> ee1b30f12, but it saw it today, and it's not happy:\n\nHmm, I've also done Coverity Scan in development but I wasn't able to\nsee this one for some reason...\n\n>\n> /srv/coverity/git/pgsql-git/postgresql/src/include/lib/radixtree.h: 1621 in local_ts_extend_down()\n> 1615 node = child;\n> 1616 shift -= RT_SPAN;\n> 1617 }\n> 1618\n> 1619 /* Reserve slot for the value. */\n> 1620 n4 = (RT_NODE_4 *) node.local;\n> >>> CID 1594658: Integer handling issues (BAD_SHIFT)\n> >>> In expression \"key >> shift\", shifting by a negative amount has undefined behavior. The shift amount, \"shift\", is as little as -7.\n> 1621 n4->chunks[0] = RT_GET_KEY_CHUNK(key, shift);\n> 1622 n4->base.count = 1;\n> 1623\n> 1624 return &n4->children[0];\n> 1625 }\n> 1626\n>\n> I think the point here is that if you start with an arbitrary\n> non-negative shift value, the preceding loop may in fact decrement it\n> down to something less than zero before exiting, in which case we\n> would indeed have trouble. I suspect that the code is making\n> undocumented assumptions about the possible initial values of shift.\n> Maybe some Asserts would be good? Also, if we're effectively assuming\n> that shift must be exactly zero here, why not let the compiler\n> hard-code that?\n>\n> - n4->chunks[0] = RT_GET_KEY_CHUNK(key, shift);\n> + n4->chunks[0] = RT_GET_KEY_CHUNK(key, 0);\n\nSounds like a good solution. I've attached the patch for that.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 25 Mar 2024 10:02:18 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Masahiko Sawada <sawada.mshk@gmail.com> writes:\n> On Mon, Mar 25, 2024 at 1:53 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>> I think the point here is that if you start with an arbitrary\n>> non-negative shift value, the preceding loop may in fact decrement it\n>> down to something less than zero before exiting, in which case we\n>> would indeed have trouble. I suspect that the code is making\n>> undocumented assumptions about the possible initial values of shift.\n>> Maybe some Asserts would be good? Also, if we're effectively assuming\n>> that shift must be exactly zero here, why not let the compiler\n>> hard-code that?\n\n> Sounds like a good solution. I've attached the patch for that.\n\nPersonally I'd put the Assert immediately after the loop, because\nit's not related to the \"Reserve slot for the value\" comment.\nSeems reasonable otherwise.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 24 Mar 2024 21:13:24 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 25, 2024 at 8:02 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Mar 25, 2024 at 1:53 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >\n> > I'm not sure why it took a couple weeks for Coverity to notice\n> > ee1b30f12, but it saw it today, and it's not happy:\n>\n> Hmm, I've also done Coverity Scan in development but I wasn't able to\n> see this one for some reason...\n\nHmm, before 30e144287 this code only ran in a test module, is it\npossible Coverity would not find it there?\n\n\n", "msg_date": "Mon, 25 Mar 2024 08:14:06 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "John Naylor <johncnaylorls@gmail.com> writes:\n> Hmm, before 30e144287 this code only ran in a test module, is it\n> possible Coverity would not find it there?\n\nThat could indeed explain why Coverity didn't see it. I'm not\nsure how our community run is set up, but it may not build the\ntest modules.\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Sun, 24 Mar 2024 21:27:18 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 25, 2024 at 10:13 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n>\n> Masahiko Sawada <sawada.mshk@gmail.com> writes:\n> > On Mon, Mar 25, 2024 at 1:53 AM Tom Lane <tgl@sss.pgh.pa.us> wrote:\n> >> I think the point here is that if you start with an arbitrary\n> >> non-negative shift value, the preceding loop may in fact decrement it\n> >> down to something less than zero before exiting, in which case we\n> >> would indeed have trouble. I suspect that the code is making\n> >> undocumented assumptions about the possible initial values of shift.\n> >> Maybe some Asserts would be good? Also, if we're effectively assuming\n> >> that shift must be exactly zero here, why not let the compiler\n> >> hard-code that?\n>\n> > Sounds like a good solution. I've attached the patch for that.\n>\n> Personally I'd put the Assert immediately after the loop, because\n> it's not related to the \"Reserve slot for the value\" comment.\n> Seems reasonable otherwise.\n>\n\nThanks. Pushed the fix after moving the Assert.\n\n\nRegards,\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Mon, 25 Mar 2024 12:13:19 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 22, 2024 at 12:20 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Mar 21, 2024 at 7:48 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > v77-0001\n> >\n> > - dead_items = (VacDeadItems *) palloc(vac_max_items_to_alloc_size(max_items));\n> > - dead_items->max_items = max_items;\n> > - dead_items->num_items = 0;\n> > + vacrel->dead_items = TidStoreCreate(vac_work_mem, NULL, 0);\n> > +\n> > + dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));\n> > + dead_items_info->max_bytes = vac_work_mem * 1024L;\n> >\n> > This is confusing enough that it looks like a bug:\n> >\n> > [inside TidStoreCreate()]\n> > /* choose the maxBlockSize to be no larger than 1/16 of max_bytes */\n> > while (16 * maxBlockSize > max_bytes * 1024L)\n> > maxBlockSize >>= 1;\n> >\n> > This was copied from CreateWorkExprContext, which operates directly on\n> > work_mem -- if the parameter is actually bytes, we can't \"* 1024\"\n> > here. If we're passing something measured in kilobytes, the parameter\n> > is badly named. Let's use convert once and use bytes everywhere.\n>\n> True. The attached 0001 patch fixes it.\n\nv78-0001 and 02 are fine, but for 0003 there is a consequence that I\ndidn't see mentioned: vac_work_mem now refers to bytes, where before\nit referred to kilobytes. It seems pretty confusing to use a different\nconvention from elsewhere, especially if it has the same name but\ndifferent meaning across versions. Worse, this change is buried inside\na moving-stuff-around diff, making it hard to see. Maybe \"convert only\nonce\" is still possible, but I was actually thinking of\n\n+ dead_items_info->max_bytes = vac_work_mem * 1024L;\n+ vacrel->dead_items = TidStoreCreate(dead_items_info->max_bytes, NULL, 0);\n\nThat way it's pretty obvious that it's correct. That may require a bit\nof duplication and moving around for shmem, but there is some of that\nalready.\n\nMore on 0003:\n\n- * The major space usage for vacuuming is storage for the array of dead TIDs\n+ * The major space usage for vacuuming is TidStore, a storage for dead TIDs\n\n+ * autovacuum_work_mem) memory space to keep track of dead TIDs. If the\n+ * TidStore is full, we must call lazy_vacuum to vacuum indexes (and to vacuum\n\nI wonder if the comments here should refer to it using a more natural\nspelling, like \"TID store\".\n\n- * items in the dead_items array for later vacuuming, count live and\n+ * items in the dead_items for later vacuuming, count live and\n\nMaybe \"the dead_items area\", or \"the dead_items store\" or \"in dead_items\"?\n\n- * remaining LP_DEAD line pointers on the page in the dead_items\n- * array. These dead items include those pruned by lazy_scan_prune()\n- * as well we line pointers previously marked LP_DEAD.\n+ * remaining LP_DEAD line pointers on the page in the dead_items.\n+ * These dead items include those pruned by lazy_scan_prune() as well\n+ * we line pointers previously marked LP_DEAD.\n\nHere maybe \"into dead_items\".\n\nAlso, \"we line pointers\" seems to be a pre-existing typo.\n\n- (errmsg(\"table \\\"%s\\\": removed %lld dead item identifiers in %u pages\",\n- vacrel->relname, (long long) index, vacuumed_pages)));\n+ (errmsg(\"table \\\"%s\\\": removed \" INT64_FORMAT \"dead item identifiers\nin %u pages\",\n+ vacrel->relname, vacrel->dead_items_info->num_items, vacuumed_pages)));\n\nThis is a translated message, so let's keep the message the same.\n\n/*\n * Allocate dead_items (either using palloc, or in dynamic shared memory).\n * Sets dead_items in vacrel for caller.\n *\n * Also handles parallel initialization as part of allocating dead_items in\n * DSM when required.\n */\nstatic void\ndead_items_alloc(LVRelState *vacrel, int nworkers)\n\nThis comment didn't change at all. It's not wrong, but let's consider\nupdating the specifics.\n\nv78-0004:\n\n> > +#define dsa_create(tranch_id) \\\n> > + dsa_create_ext(tranch_id, DSA_INITIAL_SEGMENT_SIZE, DSA_MAX_SEGMENT_SIZE)\n> >\n> > Since these macros are now referring to defaults, maybe their name\n> > should reflect that. Something like DSA_DEFAULT_INIT_SEGMENT_SIZE\n> > (*_MAX_*)\n>\n> It makes sense to rename DSA_INITIAL_SEGMENT_SIZE , but I think that\n> the DSA_MAX_SEGMENT_SIZE is the theoretical maximum size, the current\n> name also makes sense to me.\n\nRight, that makes sense.\n\nv78-0005:\n\n\"Although commit XXX\nallowed specifying the initial and maximum DSA segment sizes, callers\nstill needed to clamp their own limits, which was not consistent and\nuser-friendly.\"\n\nPerhaps s/still needed/would have needed/ ..., since we're preventing\nthat necessity.\n\n> > Did you try it with 1MB m_w_m?\n>\n> I've incorporated the above comments and test results look good to me.\n\nCould you be more specific about what the test was?\nDoes it work with 1MB m_w_m?\n\n+ /*\n+ * Choose the initial and maximum DSA segment sizes to be no longer\n+ * than 1/16 and 1/8 of max_bytes, respectively. If the initial\n+ * segment size is low, we end up having many segments, which risks\n+ * exceeding the total number of segments the platform can have.\n\nThe second sentence is technically correct, but I'm not sure how it\nrelates to the code that follows.\n\n+ while (16 * dsa_init_size > max_bytes)\n+ dsa_init_size >>= 1;\n+ while (8 * dsa_max_size > max_bytes)\n+ dsa_max_size >>= 1;\n\nI'm not sure we need a separate loop for \"dsa_init_size\". Can we just have :\n\nwhile (8 * dsa_max_size > max_bytes)\n dsa_max_size >>= 1;\n\nif (dsa_max_size < DSA_MIN_SEGMENT_SIZE)\n dsa_max_size = DSA_MIN_SEGMENT_SIZE;\n\nif (dsa_init_size > dsa_max_size)\n dsa_init_size = dsa_max_size;\n\n@@ -113,13 +113,10 @@ static void\ntidstore_iter_extract_tids(TidStoreIter *iter, BlockNumber blkno,\n * CurrentMemoryContext at the time of this call. The TID storage, backed\n * by a radix tree, will live in its child memory context, rt_context. The\n * TidStore will be limited to (approximately) max_bytes total memory\n- * consumption. If the 'area' is non-NULL, the radix tree is created in the\n- * DSA area.\n- *\n- * The returned object is allocated in backend-local memory.\n+ * consumption.\n\nThe existing comment slipped past my radar, but max_bytes is not a\nlimit, it's a hint. Come to think of it, it never was a limit in the\nnormal sense, but in earlier patches it was the criteria for reporting\n\"I'm full\" when asked.\n\n void\n TidStoreDestroy(TidStore *ts)\n {\n- /* Destroy underlying radix tree */\n if (TidStoreIsShared(ts))\n+ {\n+ /* Destroy underlying radix tree */\n shared_ts_free(ts->tree.shared);\n+\n+ dsa_detach(ts->area);\n+ }\n else\n local_ts_free(ts->tree.local);\n\nIt's still destroyed in the local case, so not sure why this comment was moved?\n\nv78-0006:\n\n-#define PARALLEL_VACUUM_KEY_DEAD_ITEMS 2\n+/* 2 was PARALLEL_VACUUM_KEY_DEAD_ITEMS */\n\nI don't see any use in core outside this module -- maybe it's possible\nto renumber these?\n\n\n", "msg_date": "Mon, 25 Mar 2024 13:25:17 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 25, 2024 at 3:25 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Fri, Mar 22, 2024 at 12:20 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Thu, Mar 21, 2024 at 7:48 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > v77-0001\n> > >\n> > > - dead_items = (VacDeadItems *) palloc(vac_max_items_to_alloc_size(max_items));\n> > > - dead_items->max_items = max_items;\n> > > - dead_items->num_items = 0;\n> > > + vacrel->dead_items = TidStoreCreate(vac_work_mem, NULL, 0);\n> > > +\n> > > + dead_items_info = (VacDeadItemsInfo *) palloc(sizeof(VacDeadItemsInfo));\n> > > + dead_items_info->max_bytes = vac_work_mem * 1024L;\n> > >\n> > > This is confusing enough that it looks like a bug:\n> > >\n> > > [inside TidStoreCreate()]\n> > > /* choose the maxBlockSize to be no larger than 1/16 of max_bytes */\n> > > while (16 * maxBlockSize > max_bytes * 1024L)\n> > > maxBlockSize >>= 1;\n> > >\n> > > This was copied from CreateWorkExprContext, which operates directly on\n> > > work_mem -- if the parameter is actually bytes, we can't \"* 1024\"\n> > > here. If we're passing something measured in kilobytes, the parameter\n> > > is badly named. Let's use convert once and use bytes everywhere.\n> >\n> > True. The attached 0001 patch fixes it.\n>\n> v78-0001 and 02 are fine, but for 0003 there is a consequence that I\n> didn't see mentioned:\n\nI think that the fix done in 0001 patch can be merged into 0003 patch.\n\n> vac_work_mem now refers to bytes, where before\n> it referred to kilobytes. It seems pretty confusing to use a different\n> convention from elsewhere, especially if it has the same name but\n> different meaning across versions. Worse, this change is buried inside\n> a moving-stuff-around diff, making it hard to see. Maybe \"convert only\n> once\" is still possible, but I was actually thinking of\n>\n> + dead_items_info->max_bytes = vac_work_mem * 1024L;\n> + vacrel->dead_items = TidStoreCreate(dead_items_info->max_bytes, NULL, 0);\n>\n> That way it's pretty obvious that it's correct. That may require a bit\n> of duplication and moving around for shmem, but there is some of that\n> already.\n\nAgreed.\n\n>\n> More on 0003:\n>\n> - * The major space usage for vacuuming is storage for the array of dead TIDs\n> + * The major space usage for vacuuming is TidStore, a storage for dead TIDs\n>\n> + * autovacuum_work_mem) memory space to keep track of dead TIDs. If the\n> + * TidStore is full, we must call lazy_vacuum to vacuum indexes (and to vacuum\n>\n> I wonder if the comments here should refer to it using a more natural\n> spelling, like \"TID store\".\n>\n> - * items in the dead_items array for later vacuuming, count live and\n> + * items in the dead_items for later vacuuming, count live and\n>\n> Maybe \"the dead_items area\", or \"the dead_items store\" or \"in dead_items\"?\n>\n> - * remaining LP_DEAD line pointers on the page in the dead_items\n> - * array. These dead items include those pruned by lazy_scan_prune()\n> - * as well we line pointers previously marked LP_DEAD.\n> + * remaining LP_DEAD line pointers on the page in the dead_items.\n> + * These dead items include those pruned by lazy_scan_prune() as well\n> + * we line pointers previously marked LP_DEAD.\n>\n> Here maybe \"into dead_items\".\n>\n> Also, \"we line pointers\" seems to be a pre-existing typo.\n>\n> - (errmsg(\"table \\\"%s\\\": removed %lld dead item identifiers in %u pages\",\n> - vacrel->relname, (long long) index, vacuumed_pages)));\n> + (errmsg(\"table \\\"%s\\\": removed \" INT64_FORMAT \"dead item identifiers\n> in %u pages\",\n> + vacrel->relname, vacrel->dead_items_info->num_items, vacuumed_pages)));\n>\n> This is a translated message, so let's keep the message the same.\n>\n> /*\n> * Allocate dead_items (either using palloc, or in dynamic shared memory).\n> * Sets dead_items in vacrel for caller.\n> *\n> * Also handles parallel initialization as part of allocating dead_items in\n> * DSM when required.\n> */\n> static void\n> dead_items_alloc(LVRelState *vacrel, int nworkers)\n>\n> This comment didn't change at all. It's not wrong, but let's consider\n> updating the specifics.\n\nFixed above comments.\n\n> v78-0005:\n>\n> \"Although commit XXX\n> allowed specifying the initial and maximum DSA segment sizes, callers\n> still needed to clamp their own limits, which was not consistent and\n> user-friendly.\"\n>\n> Perhaps s/still needed/would have needed/ ..., since we're preventing\n> that necessity.\n>\n> > > Did you try it with 1MB m_w_m?\n> >\n> > I've incorporated the above comments and test results look good to me.\n>\n> Could you be more specific about what the test was?\n> Does it work with 1MB m_w_m?\n\nIf m_w_m is 1MB, both the initial and maximum segment sizes are 256kB.\n\nFYI other test cases I tested were:\n\n* m_w_m = 2199023254528 (maximum value)\ninitial: 1MB\nmax: 128GB\n\n* m_w_m = 64MB (default)\ninitial: 1MB\nmax: 8MB\n\n>\n> + /*\n> + * Choose the initial and maximum DSA segment sizes to be no longer\n> + * than 1/16 and 1/8 of max_bytes, respectively. If the initial\n> + * segment size is low, we end up having many segments, which risks\n> + * exceeding the total number of segments the platform can have.\n>\n> The second sentence is technically correct, but I'm not sure how it\n> relates to the code that follows.\n>\n> + while (16 * dsa_init_size > max_bytes)\n> + dsa_init_size >>= 1;\n> + while (8 * dsa_max_size > max_bytes)\n> + dsa_max_size >>= 1;\n>\n> I'm not sure we need a separate loop for \"dsa_init_size\". Can we just have :\n>\n> while (8 * dsa_max_size > max_bytes)\n> dsa_max_size >>= 1;\n>\n> if (dsa_max_size < DSA_MIN_SEGMENT_SIZE)\n> dsa_max_size = DSA_MIN_SEGMENT_SIZE;\n>\n> if (dsa_init_size > dsa_max_size)\n> dsa_init_size = dsa_max_size;\n\nAgreed.\n\n>\n> @@ -113,13 +113,10 @@ static void\n> tidstore_iter_extract_tids(TidStoreIter *iter, BlockNumber blkno,\n> * CurrentMemoryContext at the time of this call. The TID storage, backed\n> * by a radix tree, will live in its child memory context, rt_context. The\n> * TidStore will be limited to (approximately) max_bytes total memory\n> - * consumption. If the 'area' is non-NULL, the radix tree is created in the\n> - * DSA area.\n> - *\n> - * The returned object is allocated in backend-local memory.\n> + * consumption.\n>\n> The existing comment slipped past my radar, but max_bytes is not a\n> limit, it's a hint. Come to think of it, it never was a limit in the\n> normal sense, but in earlier patches it was the criteria for reporting\n> \"I'm full\" when asked.\n\nUpdated the comment.\n\n>\n> void\n> TidStoreDestroy(TidStore *ts)\n> {\n> - /* Destroy underlying radix tree */\n> if (TidStoreIsShared(ts))\n> + {\n> + /* Destroy underlying radix tree */\n> shared_ts_free(ts->tree.shared);\n> +\n> + dsa_detach(ts->area);\n> + }\n> else\n> local_ts_free(ts->tree.local);\n>\n> It's still destroyed in the local case, so not sure why this comment was moved?\n>\n> v78-0006:\n>\n> -#define PARALLEL_VACUUM_KEY_DEAD_ITEMS 2\n> +/* 2 was PARALLEL_VACUUM_KEY_DEAD_ITEMS */\n>\n> I don't see any use in core outside this module -- maybe it's possible\n> to renumber these?\n\nFixed the above points.\n\nI've attached the latest patches. The 0004 and 0006 patches are\nupdates from the previous version.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 25 Mar 2024 22:06:46 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Mar 25, 2024 at 8:07 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Mar 25, 2024 at 3:25 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Fri, Mar 22, 2024 at 12:20 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n\n> > - * remaining LP_DEAD line pointers on the page in the dead_items\n> > - * array. These dead items include those pruned by lazy_scan_prune()\n> > - * as well we line pointers previously marked LP_DEAD.\n> > + * remaining LP_DEAD line pointers on the page in the dead_items.\n> > + * These dead items include those pruned by lazy_scan_prune() as well\n> > + * we line pointers previously marked LP_DEAD.\n> >\n> > Here maybe \"into dead_items\".\n\n- * remaining LP_DEAD line pointers on the page in the dead_items.\n+ * remaining LP_DEAD line pointers on the page into the dead_items.\n\nLet me explain. It used to be \"in the dead_items array.\" It is not an\narray anymore, so it was changed to \"in the dead_items\". dead_items is\na variable name, and names don't take \"the\". \"into dead_items\" seems\nmost natural to me, but there are other possible phrasings.\n\n> > > > Did you try it with 1MB m_w_m?\n> > >\n> > > I've incorporated the above comments and test results look good to me.\n> >\n> > Could you be more specific about what the test was?\n> > Does it work with 1MB m_w_m?\n>\n> If m_w_m is 1MB, both the initial and maximum segment sizes are 256kB.\n>\n> FYI other test cases I tested were:\n>\n> * m_w_m = 2199023254528 (maximum value)\n> initial: 1MB\n> max: 128GB\n>\n> * m_w_m = 64MB (default)\n> initial: 1MB\n> max: 8MB\n\nIf the test was a vacuum, how big a table was needed to hit 128GB?\n\n> > The existing comment slipped past my radar, but max_bytes is not a\n> > limit, it's a hint. Come to think of it, it never was a limit in the\n> > normal sense, but in earlier patches it was the criteria for reporting\n> > \"I'm full\" when asked.\n>\n> Updated the comment.\n\n+ * max_bytes is not a limit; it's used to choose the memory block sizes of\n+ * a memory context for TID storage in order for the total memory consumption\n+ * not to be overshot a lot. The caller can use the max_bytes as the criteria\n+ * for reporting whether it's full or not.\n\nThis is good information. I suggest this edit:\n\n\"max_bytes\" is not an internally-enforced limit; it is used only as a\nhint to cap the memory block size of the memory context for TID\nstorage. This reduces space wastage due to over-allocation. If the\ncaller wants to monitor memory usage, it must compare its limit with\nthe value reported by TidStoreMemoryUsage().\n\nOther comments:\n\nv79-0002 looks good to me.\n\nv79-0003:\n\n\"With this commit, when creating a shared TidStore, a dedicated DSA\narea is created for TID storage instead of using the provided DSA\narea.\"\n\nThis is very subtle, but \"the provided...\" implies there still is one.\n-> \"a provided...\"\n\n+ * Similar to TidStoreCreateLocal() but create a shared TidStore on a\n+ * DSA area. The TID storage will live in the DSA area, and a memory\n+ * context rt_context will have only meta data of the radix tree.\n\n-> \"the memory context\"\n\nI think you can go ahead and commit 0002 and 0003/4.\n\nv79-0005:\n\n- bypass = (vacrel->lpdead_item_pages < threshold &&\n- vacrel->lpdead_items < MAXDEADITEMS(32L * 1024L * 1024L));\n+ bypass = (vacrel->lpdead_item_pages < threshold) &&\n+ TidStoreMemoryUsage(vacrel->dead_items) < (32L * 1024L * 1024L);\n\nThe parentheses look strange, and the first line shouldn't change\nwithout a good reason.\n\n- /* Set dead_items space */\n- dead_items = (VacDeadItems *) shm_toc_lookup(toc,\n- PARALLEL_VACUUM_KEY_DEAD_ITEMS,\n- false);\n+ /* Set dead items */\n+ dead_items = TidStoreAttach(shared->dead_items_dsa_handle,\n+ shared->dead_items_handle);\n\nI feel ambivalent about this comment change. The original is not very\ndescriptive to begin with. If we need to change at all, maybe \"find\ndead_items in shared memory\"?\n\nv79-0005: As I said earlier, Dilip Kumar reviewed an earlier version.\n\nv79-0006:\n\nvac_work_mem should also go back to being an int.\n\n\n", "msg_date": "Wed, 27 Mar 2024 07:24:50 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 27, 2024 at 9:25 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Mon, Mar 25, 2024 at 8:07 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Mar 25, 2024 at 3:25 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > >\n> > > On Fri, Mar 22, 2024 at 12:20 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > > - * remaining LP_DEAD line pointers on the page in the dead_items\n> > > - * array. These dead items include those pruned by lazy_scan_prune()\n> > > - * as well we line pointers previously marked LP_DEAD.\n> > > + * remaining LP_DEAD line pointers on the page in the dead_items.\n> > > + * These dead items include those pruned by lazy_scan_prune() as well\n> > > + * we line pointers previously marked LP_DEAD.\n> > >\n> > > Here maybe \"into dead_items\".\n>\n> - * remaining LP_DEAD line pointers on the page in the dead_items.\n> + * remaining LP_DEAD line pointers on the page into the dead_items.\n>\n> Let me explain. It used to be \"in the dead_items array.\" It is not an\n> array anymore, so it was changed to \"in the dead_items\". dead_items is\n> a variable name, and names don't take \"the\". \"into dead_items\" seems\n> most natural to me, but there are other possible phrasings.\n\nThanks for the explanation. I was distracted. Fixed in the latest patch.\n\n>\n> > > > > Did you try it with 1MB m_w_m?\n> > > >\n> > > > I've incorporated the above comments and test results look good to me.\n> > >\n> > > Could you be more specific about what the test was?\n> > > Does it work with 1MB m_w_m?\n> >\n> > If m_w_m is 1MB, both the initial and maximum segment sizes are 256kB.\n> >\n> > FYI other test cases I tested were:\n> >\n> > * m_w_m = 2199023254528 (maximum value)\n> > initial: 1MB\n> > max: 128GB\n> >\n> > * m_w_m = 64MB (default)\n> > initial: 1MB\n> > max: 8MB\n>\n> If the test was a vacuum, how big a table was needed to hit 128GB?\n\nI just checked how TIdStoreCreateLocal() calculated the initial and\nmax segment sizes while changing m_w_m, so didn't check how big\nsegments are actually allocated in the maximum value test case.\n\n>\n> > > The existing comment slipped past my radar, but max_bytes is not a\n> > > limit, it's a hint. Come to think of it, it never was a limit in the\n> > > normal sense, but in earlier patches it was the criteria for reporting\n> > > \"I'm full\" when asked.\n> >\n> > Updated the comment.\n>\n> + * max_bytes is not a limit; it's used to choose the memory block sizes of\n> + * a memory context for TID storage in order for the total memory consumption\n> + * not to be overshot a lot. The caller can use the max_bytes as the criteria\n> + * for reporting whether it's full or not.\n>\n> This is good information. I suggest this edit:\n>\n> \"max_bytes\" is not an internally-enforced limit; it is used only as a\n> hint to cap the memory block size of the memory context for TID\n> storage. This reduces space wastage due to over-allocation. If the\n> caller wants to monitor memory usage, it must compare its limit with\n> the value reported by TidStoreMemoryUsage().\n>\n> Other comments:\n\nThanks for the suggestion!\n\n>\n> v79-0002 looks good to me.\n>\n> v79-0003:\n>\n> \"With this commit, when creating a shared TidStore, a dedicated DSA\n> area is created for TID storage instead of using the provided DSA\n> area.\"\n>\n> This is very subtle, but \"the provided...\" implies there still is one.\n> -> \"a provided...\"\n>\n> + * Similar to TidStoreCreateLocal() but create a shared TidStore on a\n> + * DSA area. The TID storage will live in the DSA area, and a memory\n> + * context rt_context will have only meta data of the radix tree.\n>\n> -> \"the memory context\"\n\nFixed in the latest patch.\n\n>\n> I think you can go ahead and commit 0002 and 0003/4.\n\nI've pushed the 0002 (dsa init and max segment size) patch, and will\npush the attached 0001 patch next.\n\n>\n> v79-0005:\n>\n> - bypass = (vacrel->lpdead_item_pages < threshold &&\n> - vacrel->lpdead_items < MAXDEADITEMS(32L * 1024L * 1024L));\n> + bypass = (vacrel->lpdead_item_pages < threshold) &&\n> + TidStoreMemoryUsage(vacrel->dead_items) < (32L * 1024L * 1024L);\n>\n> The parentheses look strange, and the first line shouldn't change\n> without a good reason.\n\nFixed.\n\n>\n> - /* Set dead_items space */\n> - dead_items = (VacDeadItems *) shm_toc_lookup(toc,\n> - PARALLEL_VACUUM_KEY_DEAD_ITEMS,\n> - false);\n> + /* Set dead items */\n> + dead_items = TidStoreAttach(shared->dead_items_dsa_handle,\n> + shared->dead_items_handle);\n>\n> I feel ambivalent about this comment change. The original is not very\n> descriptive to begin with. If we need to change at all, maybe \"find\n> dead_items in shared memory\"?\n\nAgreed.\n\n>\n> v79-0005: As I said earlier, Dilip Kumar reviewed an earlier version.\n>\n> v79-0006:\n>\n> vac_work_mem should also go back to being an int.\n\nFixed.\n\nI've attached the latest patches.\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Wed, 27 Mar 2024 17:43:37 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, Mar 27, 2024 at 5:43 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Wed, Mar 27, 2024 at 9:25 AM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Mon, Mar 25, 2024 at 8:07 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > On Mon, Mar 25, 2024 at 3:25 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > > >\n> > > > On Fri, Mar 22, 2024 at 12:20 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > > - * remaining LP_DEAD line pointers on the page in the dead_items\n> > > > - * array. These dead items include those pruned by lazy_scan_prune()\n> > > > - * as well we line pointers previously marked LP_DEAD.\n> > > > + * remaining LP_DEAD line pointers on the page in the dead_items.\n> > > > + * These dead items include those pruned by lazy_scan_prune() as well\n> > > > + * we line pointers previously marked LP_DEAD.\n> > > >\n> > > > Here maybe \"into dead_items\".\n> >\n> > - * remaining LP_DEAD line pointers on the page in the dead_items.\n> > + * remaining LP_DEAD line pointers on the page into the dead_items.\n> >\n> > Let me explain. It used to be \"in the dead_items array.\" It is not an\n> > array anymore, so it was changed to \"in the dead_items\". dead_items is\n> > a variable name, and names don't take \"the\". \"into dead_items\" seems\n> > most natural to me, but there are other possible phrasings.\n>\n> Thanks for the explanation. I was distracted. Fixed in the latest patch.\n>\n> >\n> > > > > > Did you try it with 1MB m_w_m?\n> > > > >\n> > > > > I've incorporated the above comments and test results look good to me.\n> > > >\n> > > > Could you be more specific about what the test was?\n> > > > Does it work with 1MB m_w_m?\n> > >\n> > > If m_w_m is 1MB, both the initial and maximum segment sizes are 256kB.\n> > >\n> > > FYI other test cases I tested were:\n> > >\n> > > * m_w_m = 2199023254528 (maximum value)\n> > > initial: 1MB\n> > > max: 128GB\n> > >\n> > > * m_w_m = 64MB (default)\n> > > initial: 1MB\n> > > max: 8MB\n> >\n> > If the test was a vacuum, how big a table was needed to hit 128GB?\n>\n> I just checked how TIdStoreCreateLocal() calculated the initial and\n> max segment sizes while changing m_w_m, so didn't check how big\n> segments are actually allocated in the maximum value test case.\n>\n> >\n> > > > The existing comment slipped past my radar, but max_bytes is not a\n> > > > limit, it's a hint. Come to think of it, it never was a limit in the\n> > > > normal sense, but in earlier patches it was the criteria for reporting\n> > > > \"I'm full\" when asked.\n> > >\n> > > Updated the comment.\n> >\n> > + * max_bytes is not a limit; it's used to choose the memory block sizes of\n> > + * a memory context for TID storage in order for the total memory consumption\n> > + * not to be overshot a lot. The caller can use the max_bytes as the criteria\n> > + * for reporting whether it's full or not.\n> >\n> > This is good information. I suggest this edit:\n> >\n> > \"max_bytes\" is not an internally-enforced limit; it is used only as a\n> > hint to cap the memory block size of the memory context for TID\n> > storage. This reduces space wastage due to over-allocation. If the\n> > caller wants to monitor memory usage, it must compare its limit with\n> > the value reported by TidStoreMemoryUsage().\n> >\n> > Other comments:\n>\n> Thanks for the suggestion!\n>\n> >\n> > v79-0002 looks good to me.\n> >\n> > v79-0003:\n> >\n> > \"With this commit, when creating a shared TidStore, a dedicated DSA\n> > area is created for TID storage instead of using the provided DSA\n> > area.\"\n> >\n> > This is very subtle, but \"the provided...\" implies there still is one.\n> > -> \"a provided...\"\n> >\n> > + * Similar to TidStoreCreateLocal() but create a shared TidStore on a\n> > + * DSA area. The TID storage will live in the DSA area, and a memory\n> > + * context rt_context will have only meta data of the radix tree.\n> >\n> > -> \"the memory context\"\n>\n> Fixed in the latest patch.\n>\n> >\n> > I think you can go ahead and commit 0002 and 0003/4.\n>\n> I've pushed the 0002 (dsa init and max segment size) patch, and will\n> push the attached 0001 patch next.\n\nPushed the refactoring patch.\n\nI've attached the rebased vacuum improvement patch for cfbot. I\nmentioned in the commit message that this patch eliminates the 1GB\nlimitation.\n\nI think the patch is in good shape. Do you have other comments or\nsuggestions, John?\n\nRegards,\n\n--\nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 28 Mar 2024 14:55:16 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 28, 2024 at 12:55 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> Pushed the refactoring patch.\n>\n> I've attached the rebased vacuum improvement patch for cfbot. I\n> mentioned in the commit message that this patch eliminates the 1GB\n> limitation.\n>\n> I think the patch is in good shape. Do you have other comments or\n> suggestions, John?\n\nI'll do another pass tomorrow, but first I wanted to get in another\nslightly-challenging in-situ test. On my humble laptop, I can still\nfit a table large enough to cause PG16 to choke on multiple rounds of\nindex cleanup:\n\ndrop table if exists test;\ncreate unlogged table test (a int, b uuid) with (autovacuum_enabled=false);\n\ninsert into test (a,b) select i, gen_random_uuid() from\ngenerate_series(1,1000*1000*1000) i;\n\ncreate index on test (a);\ncreate index on test (b);\n\ndelete from test;\n\nvacuum (verbose, truncate off, parallel 2) test;\n\nINFO: vacuuming \"john.public.test\"\nINFO: launched 1 parallel vacuum worker for index vacuuming (planned: 1)\nINFO: finished vacuuming \"john.public.test\": index scans: 1\npages: 0 removed, 6369427 remain, 6369427 scanned (100.00% of total)\ntuples: 999997174 removed, 2826 remain, 0 are dead but not yet removable\ntuples missed: 2826 dead from 18 pages not removed due to cleanup lock\ncontention\nremovable cutoff: 771, which was 0 XIDs old when operation ended\nnew relfrozenxid: 767, which is 4 XIDs ahead of previous value\nfrozen: 0 pages from table (0.00% of total) had 0 tuples frozen\nindex scan needed: 6369409 pages from table (100.00% of total) had\n999997174 dead item identifiers removed\nindex \"test_a_idx\": pages: 2741898 in total, 2741825 newly deleted,\n2741825 currently deleted, 0 reusable\nindex \"test_b_idx\": pages: 3850387 in total, 3842056 newly deleted,\n3842056 currently deleted, 0 reusable\navg read rate: 159.740 MB/s, avg write rate: 161.726 MB/s\nbuffer usage: 26367981 hits, 14958634 misses, 15144601 dirtied\nWAL usage: 3 records, 1 full page images, 2050 bytes\nsystem usage: CPU: user: 151.89 s, system: 193.54 s, elapsed: 731.59 s\n\nWatching pg_stat_progress_vacuum, dead_tuple_bytes got up to 398458880.\n\nAbout the \"tuples missed\" -- I didn't expect contention during this\ntest. I believe that's completely unrelated behavior, but wanted to\nmention it anyway, since I found it confusing.\n\n\n", "msg_date": "Thu, 28 Mar 2024 16:15:28 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 28, 2024 at 6:15 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 28, 2024 at 12:55 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > Pushed the refactoring patch.\n> >\n> > I've attached the rebased vacuum improvement patch for cfbot. I\n> > mentioned in the commit message that this patch eliminates the 1GB\n> > limitation.\n> >\n> > I think the patch is in good shape. Do you have other comments or\n> > suggestions, John?\n>\n> I'll do another pass tomorrow, but first I wanted to get in another\n> slightly-challenging in-situ test.\n\nThanks!\n\n>\n> About the \"tuples missed\" -- I didn't expect contention during this\n> test. I believe that's completely unrelated behavior, but wanted to\n> mention it anyway, since I found it confusing.\n\nI don't investigate it enough but bgwriter might be related to the contention.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Fri, 29 Mar 2024 15:05:36 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Mar 28, 2024 at 12:55 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> I think the patch is in good shape. Do you have other comments or\n> suggestions, John?\n\n--- a/doc/src/sgml/config.sgml\n+++ b/doc/src/sgml/config.sgml\n@@ -1918,11 +1918,6 @@ include_dir 'conf.d'\n too high. It may be useful to control for this by separately\n setting <xref linkend=\"guc-autovacuum-work-mem\"/>.\n </para>\n- <para>\n- Note that for the collection of dead tuple identifiers,\n- <command>VACUUM</command> is only able to utilize up to a maximum of\n- <literal>1GB</literal> of memory.\n- </para>\n </listitem>\n </varlistentry>\n\nThis is mentioned twice for two different GUCs -- need to remove the\nother one, too. Other than that, I just have minor nits:\n\n- * The major space usage for vacuuming is storage for the array of dead TIDs\n+ * The major space usage for vacuuming is TID store, a storage for dead TIDs\n\nI think I've helped edit this sentence before, but I still don't quite\nlike it. I'm thinking now \"is storage for the dead tuple IDs\".\n\n- * set upper bounds on the number of TIDs we can keep track of at once.\n+ * set upper bounds on the maximum memory that can be used for keeping track\n+ * of dead TIDs at once.\n\nI think \"maximum\" is redundant with \"upper bounds\".\n\nI also feel the commit message needs more \"meat\" -- we need to clearly\nnarrate the features and benefits. I've attached how I would write it,\nbut feel free to use what you like to match your taste.\n\nI've marked it Ready for Committer.", "msg_date": "Fri, 29 Mar 2024 14:21:32 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Fri, Mar 29, 2024 at 4:21 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Mar 28, 2024 at 12:55 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > I think the patch is in good shape. Do you have other comments or\n> > suggestions, John?\n>\n> --- a/doc/src/sgml/config.sgml\n> +++ b/doc/src/sgml/config.sgml\n> @@ -1918,11 +1918,6 @@ include_dir 'conf.d'\n> too high. It may be useful to control for this by separately\n> setting <xref linkend=\"guc-autovacuum-work-mem\"/>.\n> </para>\n> - <para>\n> - Note that for the collection of dead tuple identifiers,\n> - <command>VACUUM</command> is only able to utilize up to a maximum of\n> - <literal>1GB</literal> of memory.\n> - </para>\n> </listitem>\n> </varlistentry>\n>\n> This is mentioned twice for two different GUCs -- need to remove the\n> other one, too.\n\nGood catch, removed.\n\n> Other than that, I just have minor nits:\n>\n> - * The major space usage for vacuuming is storage for the array of dead TIDs\n> + * The major space usage for vacuuming is TID store, a storage for dead TIDs\n>\n> I think I've helped edit this sentence before, but I still don't quite\n> like it. I'm thinking now \"is storage for the dead tuple IDs\".\n>\n> - * set upper bounds on the number of TIDs we can keep track of at once.\n> + * set upper bounds on the maximum memory that can be used for keeping track\n> + * of dead TIDs at once.\n>\n> I think \"maximum\" is redundant with \"upper bounds\".\n\nFixed.\n\n>\n> I also feel the commit message needs more \"meat\" -- we need to clearly\n> narrate the features and benefits. I've attached how I would write it,\n> but feel free to use what you like to match your taste.\n\nWell, that's much better than mine.\n\n>\n> I've marked it Ready for Committer.\n\nThank you! I've attached the patch that I'm going to push tomorrow.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Mon, 1 Apr 2024 11:53:28 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Apr 1, 2024 at 9:54 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> Thank you! I've attached the patch that I'm going to push tomorrow.\n\nExcellent!\n\nI've attached a mostly-polished update on runtime embeddable values,\nstoring up to 3 offsets in the child pointer (1 on 32-bit platforms).\nAs discussed, this includes a macro to cap max possible offset that\ncan be stored in the bitmap, which I believe only reduces the valid\noffset range for 32kB pages on 32-bit platforms. Even there, it allows\nfor more line pointers than can possibly be useful. It also splits\ninto two parts for readability. It would be committed in two pieces as\nwell, since they are independently useful.", "msg_date": "Sun, 7 Apr 2024 09:08:40 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sun, Apr 7, 2024 at 9:08 AM John Naylor <johncnaylorls@gmail.com> wrote:\n> I've attached a mostly-polished update on runtime embeddable values,\n> storing up to 3 offsets in the child pointer (1 on 32-bit platforms).\n\nAnd...since there's a new bump context patch, I wanted to anticipate\nsqueezing an update on top of that, if that gets committed. 0004/5 are\nthe v6 bump context, and 0006 uses it for vacuum. The rest are to show\nit works -- the expected.out changes make possible problems in CI\neasier to see. The allocation size is 16 bytes, so this difference is\nentirely due to lack of chunk header:\n\naset: 6619136\nbump: 5047296\n\n(Note: assert builds still have the chunk header for sanity checking,\nso this was done in a more optimized build)", "msg_date": "Sun, 7 Apr 2024 16:44:56 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi,\n\nOn 2024-04-01 11:53:28 +0900, Masahiko Sawada wrote:\n> On Fri, Mar 29, 2024 at 4:21 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> > I've marked it Ready for Committer.\n>\n> Thank you! I've attached the patch that I'm going to push tomorrow.\n\nLocally I ran a 32bit build with ubsan enabled (by accident actually), which\ncomplains:\n\nperforming post-bootstrap initialization ...\n----------------------------------- stderr -----------------------------------\n../../../../../home/andres/src/postgresql/src/backend/access/common/tidstore.c:341:24: runtime error: member access within misaligned address 0xffb6258e for type 'struct BlocktableEntry', which requires 4 byte alignment\n0xffb6258e: note: pointer points here\n 00 00 02 00 01 40 dc e9 83 0b 80 48 70 ee 00 00 00 00 00 00 00 01 17 00 00 00 f8 d4 a6 ee e8 25\n ^\n #0 0x814097e in TidStoreSetBlockOffsets ../../../../../home/andres/src/postgresql/src/backend/access/common/tidstore.c:341\n #1 0x826560a in dead_items_add ../../../../../home/andres/src/postgresql/src/backend/access/heap/vacuumlazy.c:2889\n #2 0x825f8da in lazy_scan_prune ../../../../../home/andres/src/postgresql/src/backend/access/heap/vacuumlazy.c:1502\n #3 0x825da71 in lazy_scan_heap ../../../../../home/andres/src/postgresql/src/backend/access/heap/vacuumlazy.c:977\n #4 0x825ad8f in heap_vacuum_rel ../../../../../home/andres/src/postgresql/src/backend/access/heap/vacuumlazy.c:499\n #5 0x8697e97 in table_relation_vacuum ../../../../../home/andres/src/postgresql/src/include/access/tableam.h:1725\n #6 0x869fca6 in vacuum_rel ../../../../../home/andres/src/postgresql/src/backend/commands/vacuum.c:2206\n #7 0x869a0fd in vacuum ../../../../../home/andres/src/postgresql/src/backend/commands/vacuum.c:622\n #8 0x869986b in ExecVacuum ../../../../../home/andres/src/postgresql/src/backend/commands/vacuum.c:449\n #9 0x8e5f832 in standard_ProcessUtility ../../../../../home/andres/src/postgresql/src/backend/tcop/utility.c:859\n #10 0x8e5e5f6 in ProcessUtility ../../../../../home/andres/src/postgresql/src/backend/tcop/utility.c:523\n #11 0x8e5b71a in PortalRunUtility ../../../../../home/andres/src/postgresql/src/backend/tcop/pquery.c:1158\n #12 0x8e5be80 in PortalRunMulti ../../../../../home/andres/src/postgresql/src/backend/tcop/pquery.c:1315\n #13 0x8e59f9b in PortalRun ../../../../../home/andres/src/postgresql/src/backend/tcop/pquery.c:791\n #14 0x8e4d5f3 in exec_simple_query ../../../../../home/andres/src/postgresql/src/backend/tcop/postgres.c:1274\n #15 0x8e55159 in PostgresMain ../../../../../home/andres/src/postgresql/src/backend/tcop/postgres.c:4680\n #16 0x8e54445 in PostgresSingleUserMain ../../../../../home/andres/src/postgresql/src/backend/tcop/postgres.c:4136\n #17 0x88bb55e in main ../../../../../home/andres/src/postgresql/src/backend/main/main.c:194\n #18 0xf76f47c4 (/lib/i386-linux-gnu/libc.so.6+0x237c4) (BuildId: fe79efe6681a919714a4e119da2baac3a4953fbf)\n #19 0xf76f4887 in __libc_start_main (/lib/i386-linux-gnu/libc.so.6+0x23887) (BuildId: fe79efe6681a919714a4e119da2baac3a4953fbf)\n #20 0x80d40f7 in _start (/srv/dev/build/postgres/m-dev-assert-32/tmp_install/srv/dev/install/postgres/m-dev-assert-32/bin/postgres+0x80d40f7)\n\nSUMMARY: UndefinedBehaviorSanitizer: undefined-behavior ../../../../../home/andres/src/postgresql/src/backend/access/common/tidstore.c:341:24 in\nAborted (core dumped)\nchild process exited with exit code 134\ninitdb: data directory \"/srv/dev/build/postgres/m-dev-assert-32/tmp_install/initdb-template\" not removed at user's request\n\n\nAt first I was confused why CI didn't find this. Turns out that, for me, this\nis only triggered without compiler optimizations, and I had used -O0 while CI\nuses some optimizations.\n\nBacktrace:\n#9 0x0814097f in TidStoreSetBlockOffsets (ts=0xb8dfde4, blkno=15, offsets=0xffb6275c, num_offsets=11)\n at ../../../../../home/andres/src/postgresql/src/backend/access/common/tidstore.c:341\n#10 0x0826560b in dead_items_add (vacrel=0xb8df6d4, blkno=15, offsets=0xffb6275c, num_offsets=11)\n at ../../../../../home/andres/src/postgresql/src/backend/access/heap/vacuumlazy.c:2889\n#11 0x0825f8db in lazy_scan_prune (vacrel=0xb8df6d4, buf=24, blkno=15, page=0xeeb6c000 \"\", vmbuffer=729, all_visible_according_to_vm=false,\n has_lpdead_items=0xffb62a1f) at ../../../../../home/andres/src/postgresql/src/backend/access/heap/vacuumlazy.c:1502\n#12 0x0825da72 in lazy_scan_heap (vacrel=0xb8df6d4) at ../../../../../home/andres/src/postgresql/src/backend/access/heap/vacuumlazy.c:977\n#13 0x0825ad90 in heap_vacuum_rel (rel=0xb872810, params=0xffb62e90, bstrategy=0xb99d5e0)\n at ../../../../../home/andres/src/postgresql/src/backend/access/heap/vacuumlazy.c:499\n#14 0x08697e98 in table_relation_vacuum (rel=0xb872810, params=0xffb62e90, bstrategy=0xb99d5e0)\n at ../../../../../home/andres/src/postgresql/src/include/access/tableam.h:1725\n#15 0x0869fca7 in vacuum_rel (relid=1249, relation=0x0, params=0xffb62e90, bstrategy=0xb99d5e0)\n at ../../../../../home/andres/src/postgresql/src/backend/commands/vacuum.c:2206\n#16 0x0869a0fe in vacuum (relations=0xb99de08, params=0xffb62e90, bstrategy=0xb99d5e0, vac_context=0xb99d550, isTopLevel=true)\n\n(gdb) p/x page\n$1 = 0xffb6258e\n\n\nI think compiler optimizations are only tangentially involved here, they\ntrigger the stack frame layout to change, e.g. because some variable will just\nexist in a register.\n\n\nLooking at the code, the failure isn't suprising anymore:\n\tchar\t\tdata[MaxBlocktableEntrySize];\n\tBlocktableEntry *page = (BlocktableEntry *) data;\n\n'char' doesn't enforce any alignment, but you're storing a BlocktableEntry in\na char[]. You can't just do that. Look at how we do that for\ne.g. PGAlignedblock.\n\n\nWith the attached minimal fix, the tests pass again.\n\nGreetings,\n\nAndres Freund", "msg_date": "Sun, 7 Apr 2024 12:07:31 -0700", "msg_from": "Andres Freund <andres@anarazel.de>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Apr 8, 2024 at 2:07 AM Andres Freund <andres@anarazel.de> wrote:\n>\n> Looking at the code, the failure isn't suprising anymore:\n> char data[MaxBlocktableEntrySize];\n> BlocktableEntry *page = (BlocktableEntry *) data;\n>\n> 'char' doesn't enforce any alignment, but you're storing a BlocktableEntry in\n> a char[]. You can't just do that. Look at how we do that for\n> e.g. PGAlignedblock.\n>\n>\n> With the attached minimal fix, the tests pass again.\n\nThanks, will push this shortly!\n\n\n", "msg_date": "Mon, 8 Apr 2024 06:13:04 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "Hi, John!\n\nOn Mon, 8 Apr 2024 at 03:13, John Naylor <johncnaylorls@gmail.com> wrote:\n\n> On Mon, Apr 8, 2024 at 2:07 AM Andres Freund <andres@anarazel.de> wrote:\n> >\n> > Looking at the code, the failure isn't suprising anymore:\n> > char data[MaxBlocktableEntrySize];\n> > BlocktableEntry *page = (BlocktableEntry *) data;\n> >\n> > 'char' doesn't enforce any alignment, but you're storing a\n> BlocktableEntry in\n> > a char[]. You can't just do that. Look at how we do that for\n> > e.g. PGAlignedblock.\n> >\n> >\n> > With the attached minimal fix, the tests pass again.\n>\n> Thanks, will push this shortly!\n>\nBuildfarm animal mylodon looks unhappy with this:\n\nFAILED: src/backend/postgres_lib.a.p/access_common_tidstore.c.o\nccache clang-14 -Isrc/backend/postgres_lib.a.p -Isrc/include\n-I../pgsql/src/include -I/usr/include/libxml2 -I/usr/include/security\n-fdiagnostics-color=never -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch\n-O2 -g -fno-strict-aliasing -fwrapv -D_GNU_SOURCE -Wmissing-prototypes\n-Wpointer-arith -Werror=vla -Werror=unguarded-availability-new\n-Wendif-labels -Wmissing-format-attribute -Wcast-function-type\n-Wformat-security -Wdeclaration-after-statement\n-Wno-unused-command-line-argument -Wno-compound-token-split-by-macro\n-O1 -ggdb -g3 -fno-omit-frame-pointer -Wall -Wextra\n-Wno-unused-parameter -Wno-sign-compare\n-Wno-missing-field-initializers -Wno-array-bounds -std=c99\n-Wc11-extensions -Werror=c11-extensions -fPIC -isystem\n/usr/include/mit-krb5 -pthread -DBUILDING_DLL -MD -MQ\nsrc/backend/postgres_lib.a.p/access_common_tidstore.c.o -MF\nsrc/backend/postgres_lib.a.p/access_common_tidstore.c.o.d -o\nsrc/backend/postgres_lib.a.p/access_common_tidstore.c.o -c\n../pgsql/src/backend/access/common/tidstore.c\n../pgsql/src/backend/access/common/tidstore.c:48:3: error: anonymous\nstructs are a C11 extension [-Werror,-Wc11-extensions]\n struct\n ^\n\n1 error generated.\n\nRegards,\nPavel Borisov\nSupabase\n\nHi, John!On Mon, 8 Apr 2024 at 03:13, John Naylor <johncnaylorls@gmail.com> wrote:On Mon, Apr 8, 2024 at 2:07 AM Andres Freund <andres@anarazel.de> wrote:\n>\n> Looking at the code, the failure isn't suprising anymore:\n>         char            data[MaxBlocktableEntrySize];\n>         BlocktableEntry *page = (BlocktableEntry *) data;\n>\n> 'char' doesn't enforce any alignment, but you're storing a BlocktableEntry in\n> a char[]. You can't just do that.  Look at how we do that for\n> e.g. PGAlignedblock.\n>\n>\n> With the attached minimal fix, the tests pass again.\n\nThanks, will push this shortly!Buildfarm animal mylodon looks unhappy with this:FAILED: src/backend/postgres_lib.a.p/access_common_tidstore.c.o \nccache clang-14 -Isrc/backend/postgres_lib.a.p -Isrc/include -I../pgsql/src/include -I/usr/include/libxml2 -I/usr/include/security -fdiagnostics-color=never -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch -O2 -g -fno-strict-aliasing -fwrapv -D_GNU_SOURCE -Wmissing-prototypes -Wpointer-arith -Werror=vla -Werror=unguarded-availability-new -Wendif-labels -Wmissing-format-attribute -Wcast-function-type -Wformat-security -Wdeclaration-after-statement -Wno-unused-command-line-argument -Wno-compound-token-split-by-macro -O1 -ggdb -g3 -fno-omit-frame-pointer -Wall -Wextra -Wno-unused-parameter -Wno-sign-compare -Wno-missing-field-initializers -Wno-array-bounds -std=c99 -Wc11-extensions -Werror=c11-extensions -fPIC -isystem /usr/include/mit-krb5 -pthread -DBUILDING_DLL -MD -MQ src/backend/postgres_lib.a.p/access_common_tidstore.c.o -MF src/backend/postgres_lib.a.p/access_common_tidstore.c.o.d -o src/backend/postgres_lib.a.p/access_common_tidstore.c.o -c ../pgsql/src/backend/access/common/tidstore.c\n../pgsql/src/backend/access/common/tidstore.c:48:3: error: anonymous structs are a C11 extension [-Werror,-Wc11-extensions]\n struct\n ^ 1 error generated.Regards,Pavel BorisovSupabase", "msg_date": "Mon, 8 Apr 2024 16:22:28 +0400", "msg_from": "Pavel Borisov <pashkin.elfe@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Sun, Apr 7, 2024 at 9:08 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> I've attached a mostly-polished update on runtime embeddable values,\n> storing up to 3 offsets in the child pointer (1 on 32-bit platforms).\n> As discussed, this includes a macro to cap max possible offset that\n> can be stored in the bitmap, which I believe only reduces the valid\n> offset range for 32kB pages on 32-bit platforms. Even there, it allows\n> for more line pointers than can possibly be useful. It also splits\n> into two parts for readability. It would be committed in two pieces as\n> well, since they are independently useful.\n\nI pushed both of these and see that mylodon complains that anonymous\nunions are a C11 feature. I'm not actually sure that the union with\nuintptr_t is actually needed, though, since that's not accessed as\nsuch here. The simplest thing seems to get rid if the union and name\nthe inner struct \"header\", as in the attached.", "msg_date": "Mon, 8 Apr 2024 19:26:40 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, 8 Apr 2024 at 16:27, John Naylor <johncnaylorls@gmail.com> wrote:\n\n> On Sun, Apr 7, 2024 at 9:08 AM John Naylor <johncnaylorls@gmail.com>\n> wrote:\n> >\n> > I've attached a mostly-polished update on runtime embeddable values,\n> > storing up to 3 offsets in the child pointer (1 on 32-bit platforms).\n> > As discussed, this includes a macro to cap max possible offset that\n> > can be stored in the bitmap, which I believe only reduces the valid\n> > offset range for 32kB pages on 32-bit platforms. Even there, it allows\n> > for more line pointers than can possibly be useful. It also splits\n> > into two parts for readability. It would be committed in two pieces as\n> > well, since they are independently useful.\n>\n> I pushed both of these and see that mylodon complains that anonymous\n> unions are a C11 feature. I'm not actually sure that the union with\n> uintptr_t is actually needed, though, since that's not accessed as\n> such here. The simplest thing seems to get rid if the union and name\n> the inner struct \"header\", as in the attached.\n>\n\nProvided uintptr_t is not accessed it might be good to get rid of it.\n\nMaybe this patch also need correction in this:\n+#define NUM_FULL_OFFSETS ((sizeof(uintptr_t) - sizeof(uint8) -\nsizeof(int8)) / sizeof(OffsetNumber))\n\nRegards,\nPavel\n\nOn Mon, 8 Apr 2024 at 16:27, John Naylor <johncnaylorls@gmail.com> wrote:On Sun, Apr 7, 2024 at 9:08 AM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> I've attached a mostly-polished update on runtime embeddable values,\n> storing up to 3 offsets in the child pointer (1 on 32-bit platforms).\n> As discussed, this includes a macro to cap max possible offset that\n> can be stored in the bitmap, which I believe only reduces the valid\n> offset range for 32kB pages on 32-bit platforms. Even there, it allows\n> for more line pointers than can possibly be useful. It also splits\n> into two parts for readability. It would be committed in two pieces as\n> well, since they are independently useful.\n\nI pushed both of these and see that mylodon complains that anonymous\nunions are a C11 feature. I'm not actually sure that the union with\nuintptr_t is actually needed, though, since that's not accessed as\nsuch here. The simplest thing seems to get rid if the union and name\nthe inner struct \"header\", as in the attached.Provided  uintptr_t is not accessed it might be good to get rid of it.Maybe this patch also need correction in this:+#define NUM_FULL_OFFSETS ((sizeof(uintptr_t) - sizeof(uint8) - sizeof(int8)) / sizeof(OffsetNumber))Regards,Pavel", "msg_date": "Mon, 8 Apr 2024 16:42:01 +0400", "msg_from": "Pavel Borisov <pashkin.elfe@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Apr 8, 2024 at 7:42 PM Pavel Borisov <pashkin.elfe@gmail.com> wrote:\n>\n>> I pushed both of these and see that mylodon complains that anonymous\n>> unions are a C11 feature. I'm not actually sure that the union with\n>> uintptr_t is actually needed, though, since that's not accessed as\n>> such here. The simplest thing seems to get rid if the union and name\n>> the inner struct \"header\", as in the attached.\n>\n>\n> Provided uintptr_t is not accessed it might be good to get rid of it.\n>\n> Maybe this patch also need correction in this:\n> +#define NUM_FULL_OFFSETS ((sizeof(uintptr_t) - sizeof(uint8) - sizeof(int8)) / sizeof(OffsetNumber))\n\nFor full context the diff was\n\n-#define NUM_FULL_OFFSETS ((sizeof(bitmapword) - sizeof(uint16)) /\nsizeof(OffsetNumber))\n+#define NUM_FULL_OFFSETS ((sizeof(uintptr_t) - sizeof(uint8) -\nsizeof(int8)) / sizeof(OffsetNumber))\n\nI wanted the former, from f35bd9bf35 , to be independently useful (in\ncase the commit in question had some unresolvable issue), and its\nintent is to fill struct padding when the array of bitmapword happens\nto have length zero. Changing to uintptr_t for the size calculation\nreflects the intent to fit in a (local) pointer, regardless of the\nsize of a bitmapword. (If a DSA pointer happens to be a different size\nfor some odd platform, it should still work, BTW.)\n\nMy thinking with the union was, for big-endian, to force the 'flags'\nmember to where it can be set, but thinking again, it should still\nwork if by happenstance the header was smaller than the child pointer:\nA different bit would get tagged, but I believe that's irrelevant. The\n'flags' member makes sure a byte is reserved for the tag, but it may\nnot be where the tag is actually located, if that makes sense.\n\n\n", "msg_date": "Mon, 8 Apr 2024 20:17:54 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Apr 8, 2024 at 7:26 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> I pushed both of these and see that mylodon complains that anonymous\n> unions are a C11 feature. I'm not actually sure that the union with\n> uintptr_t is actually needed, though, since that's not accessed as\n> such here. The simplest thing seems to get rid if the union and name\n> the inner struct \"header\", as in the attached.\n\nI pushed this with some comment adjustments.\n\n\n", "msg_date": "Tue, 9 Apr 2024 16:22:34 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "I took a look at the coverage report from [1] and it seems pretty\ngood, but there are a couple more tests we could do.\n\n- RT_KEY_GET_SHIFT is not covered for key=0:\n\nhttps://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L803\n\nThat should be fairly simple to add to the tests.\n\n- Some paths for single-value leaves are not covered:\n\nhttps://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L904\nhttps://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L954\nhttps://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L2606\n\nHowever, these paths do get regression test coverage on 32-bit\nmachines. 64-bit builds only have leaves in the TID store, which\ndoesn't (currently) delete entries, and doesn't instantiate the tree\nwith the debug option.\n\n- In RT_SET \"if (found)\" is not covered:\n\nhttps://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L1768\n\nThat's because we don't yet have code that replaces an existing value\nwith a value of a different length.\n\n- RT_FREE_RECURSE isn't well covered:\n\nhttps://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L1768\n\nThe TID store test is pretty simple as far as distribution of block\nkeys, and focuses more on the offset bitmaps. We could try to cover\nall branches here, but it would make the test less readable, and it's\nkind of the wrong place to do that anyway. test_radixtree.c does have\na commented-out option to use shared memory, but that's for local\ntesting and won't be reflected in the coverage report. Maybe it's\nenough.\n\n- RT_DELETE: \"if (key > tree->ctl->max_val)\" is not covered:\n\nhttps://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L2644\n\nThat should be easy to add.\n\n- RT_DUMP_NODE is not covered, and never called by default anyway:\n\nhttps://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L2804\n\nIt seems we could just leave it alone since it's debug-only, but it's\nalso a lot of lines. One idea is to use elog with DEBUG5 instead of\ncommenting out the call sites, but that would cause a lot of noise.\n\n- TidStoreCreate* has some memory clamps that are not covered:\n\nhttps://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/backend/access/common/tidstore.c.gcov.html#L179\nhttps://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/backend/access/common/tidstore.c.gcov.html#L234\n\nMaybe we could experiment with using 1MB for shared, and something\nsmaller for local.\n\n[1] https://www.postgresql.org/message-id/20240414223305.m3i5eju6zylabvln%40awork3.anarazel.de\n\n\n", "msg_date": "Mon, 15 Apr 2024 16:12:38 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Apr 15, 2024 at 04:12:38PM +0700, John Naylor wrote:\n> - Some paths for single-value leaves are not covered:\n> \n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L904\n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L954\n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L2606\n> \n> However, these paths do get regression test coverage on 32-bit\n> machines. 64-bit builds only have leaves in the TID store, which\n> doesn't (currently) delete entries, and doesn't instantiate the tree\n> with the debug option.\n> \n> - In RT_SET \"if (found)\" is not covered:\n> \n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L1768\n> \n> That's because we don't yet have code that replaces an existing value\n> with a value of a different length.\n\nI saw a SIGSEGV there when using tidstore to write a fix for something else.\nPatch attached.", "msg_date": "Wed, 24 Apr 2024 14:03:19 -0700", "msg_from": "Noah Misch <noah@leadboat.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Mon, Apr 15, 2024 at 6:12 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> I took a look at the coverage report from [1] and it seems pretty\n> good, but there are a couple more tests we could do.\n\nThank you for checking!\n\n>\n> - RT_KEY_GET_SHIFT is not covered for key=0:\n>\n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L803\n>\n> That should be fairly simple to add to the tests.\n\nThere are two paths to call RT_KEY_GET_SHIFT():\n\n1. RT_SET() -> RT_KEY_GET_SHIFT()\n2. RT_SET() -> RT_EXTEND_UP() -> RT_KEY_GET_SHIFT()\n\nIn both cases, it's called when key > tree->ctl->max_val. Since the\nminimum value of max_val is 255, RT_KEY_GET_SHIFT() is never called\nwhen key=0.\n\n>\n> - Some paths for single-value leaves are not covered:\n>\n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L904\n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L954\n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L2606\n>\n> However, these paths do get regression test coverage on 32-bit\n> machines. 64-bit builds only have leaves in the TID store, which\n> doesn't (currently) delete entries, and doesn't instantiate the tree\n> with the debug option.\n\nRight.\n\n>\n> - In RT_SET \"if (found)\" is not covered:\n>\n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L1768\n>\n> That's because we don't yet have code that replaces an existing value\n> with a value of a different length.\n\nNoah reported an issue around that. We should incorporate the patch\nand cover this code path.\n\n>\n> - RT_FREE_RECURSE isn't well covered:\n>\n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L1768\n>\n> The TID store test is pretty simple as far as distribution of block\n> keys, and focuses more on the offset bitmaps. We could try to cover\n> all branches here, but it would make the test less readable, and it's\n> kind of the wrong place to do that anyway. test_radixtree.c does have\n> a commented-out option to use shared memory, but that's for local\n> testing and won't be reflected in the coverage report. Maybe it's\n> enough.\n\nAgreed.\n\n>\n> - RT_DELETE: \"if (key > tree->ctl->max_val)\" is not covered:\n>\n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L2644\n>\n> That should be easy to add.\n\nAgreed. The patch is attached.\n\n>\n> - RT_DUMP_NODE is not covered, and never called by default anyway:\n>\n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L2804\n>\n> It seems we could just leave it alone since it's debug-only, but it's\n> also a lot of lines. One idea is to use elog with DEBUG5 instead of\n> commenting out the call sites, but that would cause a lot of noise.\n\nI think we can leave it alone.\n\n>\n> - TidStoreCreate* has some memory clamps that are not covered:\n>\n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/backend/access/common/tidstore.c.gcov.html#L179\n> https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/backend/access/common/tidstore.c.gcov.html#L234\n>\n> Maybe we could experiment with using 1MB for shared, and something\n> smaller for local.\n\nI've confirmed that the local and shared tidstore with small max sizes\nsuch as 4kB and 1MB worked. Currently the max size is hard-coded in\ntest_tidstore.c but if we use work_mem as the max size, we can pass\ndifferent max sizes for local and shared in the test script.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 25 Apr 2024 10:36:02 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Apr 25, 2024 at 6:03 AM Noah Misch <noah@leadboat.com> wrote:\n>\n> On Mon, Apr 15, 2024 at 04:12:38PM +0700, John Naylor wrote:\n> > - Some paths for single-value leaves are not covered:\n> >\n> > https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L904\n> > https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L954\n> > https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L2606\n> >\n> > However, these paths do get regression test coverage on 32-bit\n> > machines. 64-bit builds only have leaves in the TID store, which\n> > doesn't (currently) delete entries, and doesn't instantiate the tree\n> > with the debug option.\n> >\n> > - In RT_SET \"if (found)\" is not covered:\n> >\n> > https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L1768\n> >\n> > That's because we don't yet have code that replaces an existing value\n> > with a value of a different length.\n>\n> I saw a SIGSEGV there when using tidstore to write a fix for something else.\n> Patch attached.\n\nGreat find, thank you for the patch!\n\nThe fix looks good to me. I think we can improve regression tests for\nbetter coverage. In TidStore on a 64-bit machine, we can store 3\noffsets in the header and these values are embedded to the leaf page.\nWith more than 3 offsets, the value size becomes more than 16 bytes\nand a single value leaf. Therefore, if we can add the test with the\narray[1,2,3,4,100], we can cover the case of replacing a single-value\nleaf with a different size new single-value leaf. Now we add 9 pairs\nof do_gset_block_offset() and check_set_block_offsets(). If these are\nannoying, we can remove the cases of array[1] and array[1,2].\n\nI've attached a new patch. In addition to the new test case I\nmentioned, I've added some new comments and removed an unnecessary\nadded line in test_tidstore.sql.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 25 Apr 2024 11:49:38 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Apr 25, 2024 at 9:50 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> > I saw a SIGSEGV there when using tidstore to write a fix for something else.\n> > Patch attached.\n>\n> Great find, thank you for the patch!\n\n+1\n\n(This occurred to me a few days ago, but I was far from my computer.)\n\nWith the purge function that Noah proposed, I believe we can also get\nrid of the comment at the top of the .sql test file warning of a\nmaintenance hazard:\n...\"To avoid adding duplicates,\n-- each call to do_set_block_offsets() should use different block\n-- numbers.\"\n\nI found that it doesn't add any measurable time to run the test.\n\n> The fix looks good to me. I think we can improve regression tests for\n> better coverage. In TidStore on a 64-bit machine, we can store 3\n> offsets in the header and these values are embedded to the leaf page.\n> With more than 3 offsets, the value size becomes more than 16 bytes\n> and a single value leaf. Therefore, if we can add the test with the\n> array[1,2,3,4,100], we can cover the case of replacing a single-value\n> leaf with a different size new single-value leaf. Now we add 9 pairs\n\nGood idea.\n\n> of do_gset_block_offset() and check_set_block_offsets(). If these are\n> annoying, we can remove the cases of array[1] and array[1,2].\n\nLet's keep those -- 32-bit platforms should also exercise this path.\n\n\n", "msg_date": "Thu, 25 Apr 2024 10:17:06 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Apr 25, 2024 at 12:17 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Apr 25, 2024 at 9:50 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > > I saw a SIGSEGV there when using tidstore to write a fix for something else.\n> > > Patch attached.\n> >\n> > Great find, thank you for the patch!\n>\n> +1\n>\n> (This occurred to me a few days ago, but I was far from my computer.)\n>\n> With the purge function that Noah proposed, I believe we can also get\n> rid of the comment at the top of the .sql test file warning of a\n> maintenance hazard:\n> ...\"To avoid adding duplicates,\n> -- each call to do_set_block_offsets() should use different block\n> -- numbers.\"\n\nGood point. Removed.\n\n>\n> > of do_gset_block_offset() and check_set_block_offsets(). If these are\n> > annoying, we can remove the cases of array[1] and array[1,2].\n>\n> Let's keep those -- 32-bit platforms should also exercise this path.\n\nAgreed.\n\nI've attached a new patch. I'll push it tonight, if there is no further comment.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Thu, 25 Apr 2024 13:38:28 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Apr 25, 2024 at 1:38 PM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Thu, Apr 25, 2024 at 12:17 PM John Naylor <johncnaylorls@gmail.com> wrote:\n> >\n> > On Thu, Apr 25, 2024 at 9:50 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> > >\n> > > > I saw a SIGSEGV there when using tidstore to write a fix for something else.\n> > > > Patch attached.\n> > >\n> > > Great find, thank you for the patch!\n> >\n> > +1\n> >\n> > (This occurred to me a few days ago, but I was far from my computer.)\n> >\n> > With the purge function that Noah proposed, I believe we can also get\n> > rid of the comment at the top of the .sql test file warning of a\n> > maintenance hazard:\n> > ...\"To avoid adding duplicates,\n> > -- each call to do_set_block_offsets() should use different block\n> > -- numbers.\"\n>\n> Good point. Removed.\n>\n> >\n> > > of do_gset_block_offset() and check_set_block_offsets(). If these are\n> > > annoying, we can remove the cases of array[1] and array[1,2].\n> >\n> > Let's keep those -- 32-bit platforms should also exercise this path.\n>\n> Agreed.\n>\n> I've attached a new patch. I'll push it tonight, if there is no further comment.\n>\n\nPushed.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com\n\n\n", "msg_date": "Thu, 25 Apr 2024 22:42:49 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Thu, Apr 25, 2024 at 8:36 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n>\n> On Mon, Apr 15, 2024 at 6:12 PM John Naylor <johncnaylorls@gmail.com> wrote:\n\n> > - RT_KEY_GET_SHIFT is not covered for key=0:\n> >\n> > https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L803\n> >\n> > That should be fairly simple to add to the tests.\n>\n> There are two paths to call RT_KEY_GET_SHIFT():\n>\n> 1. RT_SET() -> RT_KEY_GET_SHIFT()\n> 2. RT_SET() -> RT_EXTEND_UP() -> RT_KEY_GET_SHIFT()\n>\n> In both cases, it's called when key > tree->ctl->max_val. Since the\n> minimum value of max_val is 255, RT_KEY_GET_SHIFT() is never called\n> when key=0.\n\nAh, right, so it is dead code. Nothing to worry about, but it does\npoint the way to some simplifications, which I've put together in the\nattached.\n\n> > - RT_DELETE: \"if (key > tree->ctl->max_val)\" is not covered:\n> >\n> > https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L2644\n> >\n> > That should be easy to add.\n>\n> Agreed. The patch is attached.\n\nLGTM\n\n> > - TidStoreCreate* has some memory clamps that are not covered:\n> >\n> > https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/backend/access/common/tidstore.c.gcov.html#L179\n> > https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/backend/access/common/tidstore.c.gcov.html#L234\n> >\n> > Maybe we could experiment with using 1MB for shared, and something\n> > smaller for local.\n>\n> I've confirmed that the local and shared tidstore with small max sizes\n> such as 4kB and 1MB worked. Currently the max size is hard-coded in\n> test_tidstore.c but if we use work_mem as the max size, we can pass\n> different max sizes for local and shared in the test script.\n\nSeems okay, do you want to try that and see how it looks?", "msg_date": "Wed, 1 May 2024 14:29:46 +0700", "msg_from": "John Naylor <johncnaylorls@gmail.com>", "msg_from_op": false, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" }, { "msg_contents": "On Wed, May 1, 2024 at 4:29 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> On Thu, Apr 25, 2024 at 8:36 AM Masahiko Sawada <sawada.mshk@gmail.com> wrote:\n> >\n> > On Mon, Apr 15, 2024 at 6:12 PM John Naylor <johncnaylorls@gmail.com> wrote:\n>\n> > > - RT_KEY_GET_SHIFT is not covered for key=0:\n> > >\n> > > https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L803\n> > >\n> > > That should be fairly simple to add to the tests.\n> >\n> > There are two paths to call RT_KEY_GET_SHIFT():\n> >\n> > 1. RT_SET() -> RT_KEY_GET_SHIFT()\n> > 2. RT_SET() -> RT_EXTEND_UP() -> RT_KEY_GET_SHIFT()\n> >\n> > In both cases, it's called when key > tree->ctl->max_val. Since the\n> > minimum value of max_val is 255, RT_KEY_GET_SHIFT() is never called\n> > when key=0.\n>\n> Ah, right, so it is dead code. Nothing to worry about, but it does\n> point the way to some simplifications, which I've put together in the\n> attached.\n\nThank you for the patch. It looks good to me.\n\n+ /* compute the smallest shift that will allowing storing the key */\n+ start_shift = pg_leftmost_one_pos64(key) / RT_SPAN * RT_SPAN;\n\nThe comment is moved from RT_KEY_GET_SHIFT() but I think s/will\nallowing storing/will allow storing/.\n\n>\n> > > - RT_DELETE: \"if (key > tree->ctl->max_val)\" is not covered:\n> > >\n> > > https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/include/lib/radixtree.h.gcov.html#L2644\n> > >\n> > > That should be easy to add.\n> >\n> > Agreed. The patch is attached.\n>\n> LGTM\n>\n> > > - TidStoreCreate* has some memory clamps that are not covered:\n> > >\n> > > https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/backend/access/common/tidstore.c.gcov.html#L179\n> > > https://anarazel.de/postgres/cov/16-vs-HEAD-2024-04-14/src/backend/access/common/tidstore.c.gcov.html#L234\n> > >\n> > > Maybe we could experiment with using 1MB for shared, and something\n> > > smaller for local.\n> >\n> > I've confirmed that the local and shared tidstore with small max sizes\n> > such as 4kB and 1MB worked. Currently the max size is hard-coded in\n> > test_tidstore.c but if we use work_mem as the max size, we can pass\n> > different max sizes for local and shared in the test script.\n>\n> Seems okay, do you want to try that and see how it looks?\n\nI've attached a simple patch for this. In test_tidstore.sql, we used\nto create two local tidstore and one shared tidstore. I thought of\nspecifying small work_mem values for these three cases but it would\nremove the normal test cases. So I created separate tidstore for this\ntest. Also, the new test is just to check if tidstore can be created\nwith such a small size, but it might be a good idea to add some TIDs\nto check if it really works fine.\n\nRegards,\n\n-- \nMasahiko Sawada\nAmazon Web Services: https://aws.amazon.com", "msg_date": "Wed, 8 May 2024 11:53:34 +0900", "msg_from": "Masahiko Sawada <sawada.mshk@gmail.com>", "msg_from_op": true, "msg_subject": "Re: [PoC] Improve dead tuple storage for lazy vacuum" } ]
[ { "msg_contents": ">You can check the more details in the attached patch. Any feedback is\nwelcome.\n\nI have tiny comments about your patch:\n\n1. name of file is uniquekey.c?\n\n+ * pathkeys.c\n+ * Utilities for maintaining uniquekey.\n\n2. Variable \"PathKey *pathkey\" at function: add_uniquekey_for_uniqueindex,\ncan have scope reduced.\n\n+ indexpr_item = list_head(unique_index->indexprs);\n+ for (c = 0; c < unique_index->nkeycolumns; c++)\n+ {\n+ PathKey *pathkey;\n\n3. Variable int c = 0, has a redundant initialization at function:\nadd_uniquekey_for_uniqueindex.\n\n4. Has one word with misspelled?\n\n\"/* We can't *guarantee* an FuncExpr will not return NULLs */\"\n\n4. Variable int i = -1, has a redudant initialization at function:\nuniquekey_contains_in\n\n5. __attribute__ ((unused)) at function: build_composited_uniquekey, is\nincompatible with msvc.\n\n6. Postgres uses a newline after variables declarations.\n\nregards,\n\nRanier Vilela\n\n\n>You can check the more details in the attached patch. Any feedback is welcome.I have tiny comments about your patch:1. name of file is uniquekey.c?+ * pathkeys.c+ *\t  Utilities for maintaining uniquekey.2. Variable \"PathKey\t*pathkey\" at function: add_uniquekey_for_uniqueindex, can have scope reduced.+\tindexpr_item = list_head(unique_index->indexprs);+\tfor (c = 0; c < unique_index->nkeycolumns; c++)+\t{+\t\tPathKey\t\t*pathkey;3. Variable int c = 0, has a redundant initialization at function:\nadd_uniquekey_for_uniqueindex.4. Has one word with misspelled? \"/* We can't *guarantee* an FuncExpr will not return NULLs */\"4. Variable int i = -1, has a redudant initialization at function: uniquekey_contains_in5. __attribute__ ((unused)) at function: build_composited_uniquekey, is incompatible with msvc.6. Postgres uses a newline after variables declarations.regards,Ranier Vilela", "msg_date": "Wed, 7 Jul 2021 10:24:18 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": true, "msg_subject": "Re: Keep notnullattrs in RelOptInfo (Was part of UniqueKey patch\n series)" } ]
[ { "msg_contents": "Hi, hackers\n\nThe documentation [1] says:\n\nWhen dropping a subscription that is associated with a replication slot on the\nremote host (the normal state), DROP SUBSCRIPTION will connect to the remote\nhost and try to drop the replication slot as part of its operation. This is\nnecessary so that the resources allocated for the subscription on the remote\nhost are released. If this fails, either because the remote host is not\nreachable or because the remote replication slot cannot be dropped or does not\nexist or never existed, the DROP SUBSCRIPTION command will fail. To proceed in\nthis situation, disassociate the subscription from the replication slot by\nexecuting ALTER SUBSCRIPTION ... SET (slot_name = NONE).\n\nHowever, when I try this, it complains the subscription is enabled, this command\nrequires the subscription disabled. Why we need this limitation?\n\nIn src/backend/commands/subscriptioncmds.c:\n\n if (IsSet(opts.specified_opts, SUBOPT_SLOT_NAME))\n {\n if (sub->enabled && !opts.slot_name)\n ereport(ERROR,\n (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),\n errmsg(\"cannot set %s for enabled subscription\",\n \"slot_name = NONE\")));\n\n if (opts.slot_name)\n values[Anum_pg_subscription_subslotname - 1] =\n DirectFunctionCall1(namein, CStringGetDatum(opts.slot_name));\n else\n nulls[Anum_pg_subscription_subslotname - 1] = true;\n replaces[Anum_pg_subscription_subslotname - 1] = true;\n }\n\n\nOTOH, when I execute ALTER SUBSCRIPTION ... SET (slot_name=''), it doesn't complain. However,\nSELECT select pg_create_logical_replication_slot('', 'pgoutput') complains slot name is too\nshort. Although, the slot will be created at publisher, and validate the slot name, IMO, we\ncan also validate the slot_name in parse_subscription_options() to get the error early.\nAttached fixes it. Any thoughts?\n\n[1] https://www.postgresql.org/docs/current/sql-dropsubscription.html\n\n\n-- \nRegrads,\nJapin Li.\nChengDu WenWu Information Technology Co.,Ltd.", "msg_date": "Wed, 07 Jul 2021 21:54:45 +0800", "msg_from": "Japin Li <japinli@hotmail.com>", "msg_from_op": true, "msg_subject": "Why ALTER SUBSCRIPTION ... SET (slot_name='none') requires\n subscription disabled?" }, { "msg_contents": "On Wed, Jul 7, 2021 at 7:25 PM Japin Li <japinli@hotmail.com> wrote:\n>\n> Hi, hackers\n>\n> The documentation [1] says:\n>\n> When dropping a subscription that is associated with a replication slot on the\n> remote host (the normal state), DROP SUBSCRIPTION will connect to the remote\n> host and try to drop the replication slot as part of its operation. This is\n> necessary so that the resources allocated for the subscription on the remote\n> host are released. If this fails, either because the remote host is not\n> reachable or because the remote replication slot cannot be dropped or does not\n> exist or never existed, the DROP SUBSCRIPTION command will fail. To proceed in\n> this situation, disassociate the subscription from the replication slot by\n> executing ALTER SUBSCRIPTION ... SET (slot_name = NONE).\n>\n> However, when I try this, it complains the subscription is enabled, this command\n> requires the subscription disabled. Why we need this limitation?\n>\n\nIf we don't have this limitation then even after you have set the slot\nname to none, the background apply worker corresponding to that\nsubscription will continue to stream changes via the previous slot.\n\n> In src/backend/commands/subscriptioncmds.c:\n>\n> if (IsSet(opts.specified_opts, SUBOPT_SLOT_NAME))\n> {\n> if (sub->enabled && !opts.slot_name)\n> ereport(ERROR,\n> (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),\n> errmsg(\"cannot set %s for enabled subscription\",\n> \"slot_name = NONE\")));\n>\n> if (opts.slot_name)\n> values[Anum_pg_subscription_subslotname - 1] =\n> DirectFunctionCall1(namein, CStringGetDatum(opts.slot_name));\n> else\n> nulls[Anum_pg_subscription_subslotname - 1] = true;\n> replaces[Anum_pg_subscription_subslotname - 1] = true;\n> }\n>\n>\n> OTOH, when I execute ALTER SUBSCRIPTION ... SET (slot_name=''), it doesn't complain. However,\n> SELECT select pg_create_logical_replication_slot('', 'pgoutput') complains slot name is too\n> short. Although, the slot will be created at publisher, and validate the slot name, IMO, we\n> can also validate the slot_name in parse_subscription_options() to get the error early.\n> Attached fixes it. Any thoughts?\n>\n\nOh, I think this should be fixed. Can anyone else think this to be\nvalid behavior?\n\nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 8 Jul 2021 15:21:14 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Why ALTER SUBSCRIPTION ... SET (slot_name='none') requires\n subscription disabled?" }, { "msg_contents": "\nOn Thu, 08 Jul 2021 at 17:51, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> On Wed, Jul 7, 2021 at 7:25 PM Japin Li <japinli@hotmail.com> wrote:\n>>\n>> Hi, hackers\n>>\n>> The documentation [1] says:\n>>\n>> When dropping a subscription that is associated with a replication slot on the\n>> remote host (the normal state), DROP SUBSCRIPTION will connect to the remote\n>> host and try to drop the replication slot as part of its operation. This is\n>> necessary so that the resources allocated for the subscription on the remote\n>> host are released. If this fails, either because the remote host is not\n>> reachable or because the remote replication slot cannot be dropped or does not\n>> exist or never existed, the DROP SUBSCRIPTION command will fail. To proceed in\n>> this situation, disassociate the subscription from the replication slot by\n>> executing ALTER SUBSCRIPTION ... SET (slot_name = NONE).\n>>\n>> However, when I try this, it complains the subscription is enabled, this command\n>> requires the subscription disabled. Why we need this limitation?\n>>\n>\n> If we don't have this limitation then even after you have set the slot\n> name to none, the background apply worker corresponding to that\n> subscription will continue to stream changes via the previous slot.\n>\n\nYeah, thanks for your explain! Should we add some comments here?\n\n>> OTOH, when I execute ALTER SUBSCRIPTION ... SET (slot_name=''), it doesn't complain. However,\n>> SELECT select pg_create_logical_replication_slot('', 'pgoutput') complains slot name is too\n>> short. Although, the slot will be created at publisher, and validate the slot name, IMO, we\n>> can also validate the slot_name in parse_subscription_options() to get the error early.\n>> Attached fixes it. Any thoughts?\n>>\n>\n> Oh, I think this should be fixed. Can anyone else think this to be\n> valid behavior?\n>\n\nRanier Vilela provides a v2 patch and reduce the overhead of\nReplicationSlotValidateName() in thread [1].\n\n[1] https://www.postgresql.org/message-id/CAEudQAqLtNJ1wvMKLK8ZH27SGJW5OjizgyMq28bFj-_5QG1G+A@mail.gmail.com\n\n-- \nRegrads,\nJapin Li.\nChengDu WenWu Information Technology Co.,Ltd.\n\n\n", "msg_date": "Thu, 08 Jul 2021 18:13:24 +0800", "msg_from": "Japin Li <japinli@hotmail.com>", "msg_from_op": true, "msg_subject": "Re: Why ALTER SUBSCRIPTION ... SET (slot_name='none') requires\n subscription disabled?" }, { "msg_contents": "On Thu, Jul 8, 2021 at 3:43 PM Japin Li <japinli@hotmail.com> wrote:\n>\n> On Thu, 08 Jul 2021 at 17:51, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> > On Wed, Jul 7, 2021 at 7:25 PM Japin Li <japinli@hotmail.com> wrote:\n> >>\n> >> Hi, hackers\n> >>\n> >> The documentation [1] says:\n> >>\n> >> When dropping a subscription that is associated with a replication slot on the\n> >> remote host (the normal state), DROP SUBSCRIPTION will connect to the remote\n> >> host and try to drop the replication slot as part of its operation. This is\n> >> necessary so that the resources allocated for the subscription on the remote\n> >> host are released. If this fails, either because the remote host is not\n> >> reachable or because the remote replication slot cannot be dropped or does not\n> >> exist or never existed, the DROP SUBSCRIPTION command will fail. To proceed in\n> >> this situation, disassociate the subscription from the replication slot by\n> >> executing ALTER SUBSCRIPTION ... SET (slot_name = NONE).\n> >>\n> >> However, when I try this, it complains the subscription is enabled, this command\n> >> requires the subscription disabled. Why we need this limitation?\n> >>\n> >\n> > If we don't have this limitation then even after you have set the slot\n> > name to none, the background apply worker corresponding to that\n> > subscription will continue to stream changes via the previous slot.\n> >\n>\n> Yeah, thanks for your explain! Should we add some comments here?\n>\n\nSure, but let's keep that as a separate HEAD-only patch.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Thu, 8 Jul 2021 15:47:43 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Why ALTER SUBSCRIPTION ... SET (slot_name='none') requires\n subscription disabled?" }, { "msg_contents": "On Thu, 08 Jul 2021 at 18:17, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> On Thu, Jul 8, 2021 at 3:43 PM Japin Li <japinli@hotmail.com> wrote:\n>>\n>> On Thu, 08 Jul 2021 at 17:51, Amit Kapila <amit.kapila16@gmail.com> wrote:\n>> > On Wed, Jul 7, 2021 at 7:25 PM Japin Li <japinli@hotmail.com> wrote:\n>> >>\n>> >> Hi, hackers\n>> >>\n>> >> The documentation [1] says:\n>> >>\n>> >> When dropping a subscription that is associated with a replication slot on the\n>> >> remote host (the normal state), DROP SUBSCRIPTION will connect to the remote\n>> >> host and try to drop the replication slot as part of its operation. This is\n>> >> necessary so that the resources allocated for the subscription on the remote\n>> >> host are released. If this fails, either because the remote host is not\n>> >> reachable or because the remote replication slot cannot be dropped or does not\n>> >> exist or never existed, the DROP SUBSCRIPTION command will fail. To proceed in\n>> >> this situation, disassociate the subscription from the replication slot by\n>> >> executing ALTER SUBSCRIPTION ... SET (slot_name = NONE).\n>> >>\n>> >> However, when I try this, it complains the subscription is enabled, this command\n>> >> requires the subscription disabled. Why we need this limitation?\n>> >>\n>> >\n>> > If we don't have this limitation then even after you have set the slot\n>> > name to none, the background apply worker corresponding to that\n>> > subscription will continue to stream changes via the previous slot.\n>> >\n>>\n>> Yeah, thanks for your explain! Should we add some comments here?\n>>\n>\n> Sure, but let's keep that as a separate HEAD-only patch.\n\nPlease consider review v3 patch. v3-0001 adds slot_name verification in\nparse_subscription_options() and comments for why we need disable subscription\nwhere set slot_name to NONE. v3-0002 comes from Ranier Vilela, it reduce the\noverhead strlen in ReplicationSlotValidateName().\n\n-- \nRegrads,\nJapin Li.\nChengDu WenWu Information Technology Co.,Ltd.", "msg_date": "Fri, 09 Jul 2021 10:49:54 +0800", "msg_from": "Japin Li <japinli@hotmail.com>", "msg_from_op": true, "msg_subject": "Re: Why ALTER SUBSCRIPTION ... SET (slot_name='none') requires\n subscription disabled?" }, { "msg_contents": "Em qui., 8 de jul. de 2021 às 23:50, Japin Li <japinli@hotmail.com>\nescreveu:\n\n>\n> On Thu, 08 Jul 2021 at 18:17, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> > On Thu, Jul 8, 2021 at 3:43 PM Japin Li <japinli@hotmail.com> wrote:\n> >>\n> >> On Thu, 08 Jul 2021 at 17:51, Amit Kapila <amit.kapila16@gmail.com>\n> wrote:\n> >> > On Wed, Jul 7, 2021 at 7:25 PM Japin Li <japinli@hotmail.com> wrote:\n> >> >>\n> >> >> Hi, hackers\n> >> >>\n> >> >> The documentation [1] says:\n> >> >>\n> >> >> When dropping a subscription that is associated with a replication\n> slot on the\n> >> >> remote host (the normal state), DROP SUBSCRIPTION will connect to\n> the remote\n> >> >> host and try to drop the replication slot as part of its operation.\n> This is\n> >> >> necessary so that the resources allocated for the subscription on\n> the remote\n> >> >> host are released. If this fails, either because the remote host is\n> not\n> >> >> reachable or because the remote replication slot cannot be dropped\n> or does not\n> >> >> exist or never existed, the DROP SUBSCRIPTION command will fail. To\n> proceed in\n> >> >> this situation, disassociate the subscription from the replication\n> slot by\n> >> >> executing ALTER SUBSCRIPTION ... SET (slot_name = NONE).\n> >> >>\n> >> >> However, when I try this, it complains the subscription is enabled,\n> this command\n> >> >> requires the subscription disabled. Why we need this limitation?\n> >> >>\n> >> >\n> >> > If we don't have this limitation then even after you have set the slot\n> >> > name to none, the background apply worker corresponding to that\n> >> > subscription will continue to stream changes via the previous slot.\n> >> >\n> >>\n> >> Yeah, thanks for your explain! Should we add some comments here?\n> >>\n> >\n> > Sure, but let's keep that as a separate HEAD-only patch.\n>\n> Please consider review v3 patch. v3-0001 adds slot_name verification in\n> parse_subscription_options() and comments for why we need disable\n> subscription\n> where set slot_name to NONE. v3-0002 comes from Ranier Vilela, it reduce\n> the\n> overhead strlen in ReplicationSlotValidateName().\n>\n+1 Seems good.\n\nregards,\nRanier Vilela\n\nEm qui., 8 de jul. de 2021 às 23:50, Japin Li <japinli@hotmail.com> escreveu:\nOn Thu, 08 Jul 2021 at 18:17, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> On Thu, Jul 8, 2021 at 3:43 PM Japin Li <japinli@hotmail.com> wrote:\n>>\n>> On Thu, 08 Jul 2021 at 17:51, Amit Kapila <amit.kapila16@gmail.com> wrote:\n>> > On Wed, Jul 7, 2021 at 7:25 PM Japin Li <japinli@hotmail.com> wrote:\n>> >>\n>> >> Hi, hackers\n>> >>\n>> >> The documentation [1] says:\n>> >>\n>> >> When dropping a subscription that is associated with a replication slot on the\n>> >> remote host (the normal state), DROP SUBSCRIPTION will connect to the remote\n>> >> host and try to drop the replication slot as part of its operation. This is\n>> >> necessary so that the resources allocated for the subscription on the remote\n>> >> host are released. If this fails, either because the remote host is not\n>> >> reachable or because the remote replication slot cannot be dropped or does not\n>> >> exist or never existed, the DROP SUBSCRIPTION command will fail. To proceed in\n>> >> this situation, disassociate the subscription from the replication slot by\n>> >> executing ALTER SUBSCRIPTION ... SET (slot_name = NONE).\n>> >>\n>> >> However, when I try this, it complains the subscription is enabled, this command\n>> >> requires the subscription disabled. Why we need this limitation?\n>> >>\n>> >\n>> > If we don't have this limitation then even after you have set the slot\n>> > name to none, the background apply worker corresponding to that\n>> > subscription will continue to stream changes via the previous slot.\n>> >\n>>\n>> Yeah, thanks for your explain! Should we add some comments here?\n>>\n>\n> Sure, but let's keep that as a separate HEAD-only patch.\n\nPlease consider review v3 patch. v3-0001 adds slot_name verification in\nparse_subscription_options() and comments for why we need disable subscription\nwhere set slot_name to NONE. v3-0002 comes from Ranier Vilela, it reduce the\noverhead strlen in ReplicationSlotValidateName().+1 Seems good.regards,Ranier Vilela", "msg_date": "Fri, 9 Jul 2021 09:03:10 -0300", "msg_from": "Ranier Vilela <ranier.vf@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Why ALTER SUBSCRIPTION ... SET (slot_name='none') requires\n subscription disabled?" }, { "msg_contents": "On Fri, Jul 9, 2021 at 8:20 AM Japin Li <japinli@hotmail.com> wrote:\n>\n> On Thu, 08 Jul 2021 at 18:17, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> > On Thu, Jul 8, 2021 at 3:43 PM Japin Li <japinli@hotmail.com> wrote:\n>\n> Please consider review v3 patch. v3-0001 adds slot_name verification in\n> parse_subscription_options() and comments for why we need disable subscription\n> where set slot_name to NONE.\n>\n\nI think we back-patch this bug-fix till v10 where it was introduced\nand update the comments only in HEAD. So, accordingly, I moved the\nchanges into two patches and changed the comments a bit. Can you\nplease test the first patch in back-branches? I'll also do it\nseparately.\n\n> v3-0002 comes from Ranier Vilela, it reduce the\n> overhead strlen in ReplicationSlotValidateName().\n>\n\nI think this patch has nothing to do with this bug-fix, so I suggest\nyou discuss this in a separate patch. Personally, I don't think it\nwill help in reducing any overhead but there doesn't appear to be any\nharm in changing it as proposed.\n\n-- \nWith Regards,\nAmit Kapila.", "msg_date": "Fri, 16 Jul 2021 11:36:14 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Why ALTER SUBSCRIPTION ... SET (slot_name='none') requires\n subscription disabled?" }, { "msg_contents": "\nOn Fri, 16 Jul 2021 at 14:06, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> On Fri, Jul 9, 2021 at 8:20 AM Japin Li <japinli@hotmail.com> wrote:\n>>\n>> On Thu, 08 Jul 2021 at 18:17, Amit Kapila <amit.kapila16@gmail.com> wrote:\n>> > On Thu, Jul 8, 2021 at 3:43 PM Japin Li <japinli@hotmail.com> wrote:\n>>\n>> Please consider review v3 patch. v3-0001 adds slot_name verification in\n>> parse_subscription_options() and comments for why we need disable subscription\n>> where set slot_name to NONE.\n>>\n>\n> I think we back-patch this bug-fix till v10 where it was introduced\n> and update the comments only in HEAD. So, accordingly, I moved the\n> changes into two patches and changed the comments a bit. Can you\n> please test the first patch in back-branches? I'll also do it\n> separately.\n>\n\nThanks for your review, I'll test the in back-branches.\n\n>> v3-0002 comes from Ranier Vilela, it reduce the\n>> overhead strlen in ReplicationSlotValidateName().\n>>\n>\n> I think this patch has nothing to do with this bug-fix, so I suggest\n> you discuss this in a separate patch. Personally, I don't think it\n> will help in reducing any overhead but there doesn't appear to be any\n> harm in changing it as proposed.\n\nAgreed.\n\n-- \nRegrads,\nJapin Li.\nChengDu WenWu Information Technology Co.,Ltd.\n\n\n", "msg_date": "Fri, 16 Jul 2021 14:13:07 +0800", "msg_from": "Japin Li <japinli@hotmail.com>", "msg_from_op": true, "msg_subject": "Re: Why ALTER SUBSCRIPTION ... SET (slot_name='none') requires\n subscription disabled?" }, { "msg_contents": "On Fri, 16 Jul 2021 at 14:06, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> On Fri, Jul 9, 2021 at 8:20 AM Japin Li <japinli@hotmail.com> wrote:\n>>\n>> On Thu, 08 Jul 2021 at 18:17, Amit Kapila <amit.kapila16@gmail.com> wrote:\n>> > On Thu, Jul 8, 2021 at 3:43 PM Japin Li <japinli@hotmail.com> wrote:\n>>\n>> Please consider review v3 patch. v3-0001 adds slot_name verification in\n>> parse_subscription_options() and comments for why we need disable subscription\n>> where set slot_name to NONE.\n>>\n>\n> I think we back-patch this bug-fix till v10 where it was introduced\n> and update the comments only in HEAD. So, accordingly, I moved the\n> changes into two patches and changed the comments a bit. Can you\n> please test the first patch in back-branches? I'll also do it\n> separately.\n>\n\nI try to back-patch to v10 stable to v14 stable, and attach two new patches:\none for PG10 & PG11 stable, and the other is for PG12 to PG14 stable.\nv4 patch can be applied on HEAD. This modify looks good to me.\n\nHow do we back-patch to back-branches? I try to use cherry-pick, but it doesn't\nalways work (without a doubt, it might be some difference between branches).\n\n>> v3-0002 comes from Ranier Vilela, it reduce the\n>> overhead strlen in ReplicationSlotValidateName().\n>>\n>\n> I think this patch has nothing to do with this bug-fix, so I suggest\n> you discuss this in a separate patch. Personally, I don't think it\n> will help in reducing any overhead but there doesn't appear to be any\n> harm in changing it as proposed.\n\nI start a new thread to discuss this [1].\n\n[1] - https://www.postgresql.org/message-id/MEYP282MB16696F6DBA8AE36A648817B2B6119@MEYP282MB1669.AUSP282.PROD.OUTLOOK.COM\n\n-- \nRegrads,\nJapin Li.\nChengDu WenWu Information Technology Co.,Ltd.", "msg_date": "Fri, 16 Jul 2021 16:42:01 +0800", "msg_from": "Japin Li <japinli@hotmail.com>", "msg_from_op": true, "msg_subject": "Re: Why ALTER SUBSCRIPTION ... SET (slot_name='none') requires\n subscription disabled?" }, { "msg_contents": "On Fri, Jul 16, 2021 at 2:12 PM Japin Li <japinli@hotmail.com> wrote:\n>\n>\n> On Fri, 16 Jul 2021 at 14:06, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> > On Fri, Jul 9, 2021 at 8:20 AM Japin Li <japinli@hotmail.com> wrote:\n> >>\n> >> On Thu, 08 Jul 2021 at 18:17, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> >> > On Thu, Jul 8, 2021 at 3:43 PM Japin Li <japinli@hotmail.com> wrote:\n> >>\n> >> Please consider review v3 patch. v3-0001 adds slot_name verification in\n> >> parse_subscription_options() and comments for why we need disable subscription\n> >> where set slot_name to NONE.\n> >>\n> >\n> > I think we back-patch this bug-fix till v10 where it was introduced\n> > and update the comments only in HEAD. So, accordingly, I moved the\n> > changes into two patches and changed the comments a bit. Can you\n> > please test the first patch in back-branches? I'll also do it\n> > separately.\n> >\n>\n> I try to back-patch to v10 stable to v14 stable, and attach two new patches:\n> one for PG10 & PG11 stable, and the other is for PG12 to PG14 stable.\n> v4 patch can be applied on HEAD. This modify looks good to me.\n>\n\nThe patch you prepared for v14 was not getting applied cleanly, so I\ndid the required modifications and then pushed.\n\n> How do we back-patch to back-branches? I try to use cherry-pick, but it doesn't\n> always work (without a doubt, it might be some difference between branches).\n>\n\nYeah, we need to adjust the patch as per the back-branches code.\n\n-- \nWith Regards,\nAmit Kapila.\n\n\n", "msg_date": "Mon, 19 Jul 2021 14:32:10 +0530", "msg_from": "Amit Kapila <amit.kapila16@gmail.com>", "msg_from_op": false, "msg_subject": "Re: Why ALTER SUBSCRIPTION ... SET (slot_name='none') requires\n subscription disabled?" }, { "msg_contents": "\nOn Mon, 19 Jul 2021 at 17:02, Amit Kapila <amit.kapila16@gmail.com> wrote:\n> On Fri, Jul 16, 2021 at 2:12 PM Japin Li <japinli@hotmail.com> wrote:\n>>\n>>\n>> On Fri, 16 Jul 2021 at 14:06, Amit Kapila <amit.kapila16@gmail.com> wrote:\n>> > On Fri, Jul 9, 2021 at 8:20 AM Japin Li <japinli@hotmail.com> wrote:\n>> >>\n>> >> On Thu, 08 Jul 2021 at 18:17, Amit Kapila <amit.kapila16@gmail.com> wrote:\n>> >> > On Thu, Jul 8, 2021 at 3:43 PM Japin Li <japinli@hotmail.com> wrote:\n>> >>\n>> >> Please consider review v3 patch. v3-0001 adds slot_name verification in\n>> >> parse_subscription_options() and comments for why we need disable subscription\n>> >> where set slot_name to NONE.\n>> >>\n>> >\n>> > I think we back-patch this bug-fix till v10 where it was introduced\n>> > and update the comments only in HEAD. So, accordingly, I moved the\n>> > changes into two patches and changed the comments a bit. Can you\n>> > please test the first patch in back-branches? I'll also do it\n>> > separately.\n>> >\n>>\n>> I try to back-patch to v10 stable to v14 stable, and attach two new patches:\n>> one for PG10 & PG11 stable, and the other is for PG12 to PG14 stable.\n>> v4 patch can be applied on HEAD. This modify looks good to me.\n>>\n>\n> The patch you prepared for v14 was not getting applied cleanly, so I\n> did the required modifications and then pushed.\n>\n>> How do we back-patch to back-branches? I try to use cherry-pick, but it doesn't\n>> always work (without a doubt, it might be some difference between branches).\n>>\n>\n> Yeah, we need to adjust the patch as per the back-branches code.\n\nThanks!\n\n-- \nRegrads,\nJapin Li.\nChengDu WenWu Information Technology Co.,Ltd.\n\n\n", "msg_date": "Tue, 20 Jul 2021 09:59:53 +0800", "msg_from": "Japin Li <japinli@hotmail.com>", "msg_from_op": true, "msg_subject": "Re: Why ALTER SUBSCRIPTION ... SET (slot_name='none') requires\n subscription disabled?" } ]
[ { "msg_contents": "Hi.\n\nThe following test case makes postgresql backend crash. The trigger is \nincorrect, but this didn't crash postgresql before\n\ncommit 86dc90056dfdbd9d1b891718d2e5614e3e432f35 (HEAD)\nAuthor: Tom Lane <tgl@sss.pgh.pa.us>\nDate: Wed Mar 31 11:52:34 2021 -0400\n\n Rework planning and execution of UPDATE and DELETE.\n\n\nProgram terminated with signal SIGSEGV, Segmentation fault.\n#0 0x00007f1032cc6905 in find_modifytable_subplan (root=0x563f05e61458, \nplan=0x563f06020e48, rtindex=1, subplan_index=0) at postgres_fdw.c:2373\n2373 else if (IsA(subplan, Result) && IsA(outerPlan(subplan), \nAppend))\n(gdb) bt\n#0 0x00007f1032cc6905 in find_modifytable_subplan (root=0x563f05e61458, \nplan=0x563f06020e48, rtindex=1, subplan_index=0) at postgres_fdw.c:2373\n#1 0x00007f1032cc6a44 in postgresPlanDirectModify (root=0x563f05e61458, \nplan=0x563f06020e48, resultRelation=1, subplan_index=0) at \npostgres_fdw.c:2433\n#2 0x0000563f035f2876 in make_modifytable (root=0x563f05e61458, \nsubplan=0x563f05e626e8, operation=CMD_DELETE, canSetTag=true, \nnominalRelation=1, rootRelation=0, partColsUpdated=false, \nresultRelations=0x563f06020b88, updateColnosLists=0x0, \nwithCheckOptionLists=0x0,\n returningLists=0x0, rowMarks=0x0, onconflict=0x0, epqParam=0) at \ncreateplan.c:7007\n#3 0x0000563f035e9ab3 in create_modifytable_plan (root=0x563f05e61458, \nbest_path=0x563f05e62168) at createplan.c:2746\n#4 0x0000563f035e5936 in create_plan_recurse (root=0x563f05e61458, \nbest_path=0x563f05e62168, flags=1) at createplan.c:530\n#5 0x0000563f035e54be in create_plan (root=0x563f05e61458, \nbest_path=0x563f05e62168) at createplan.c:347\n#6 0x0000563f035f8810 in standard_planner (parse=0x563f05e60c08, \nquery_string=0x563f05ffd8d0 \"DELETE FROM test_remote WHERE \nrow(i,j)=row(new.i,new.j)\", cursorOptions=2048, \nboundParams=0x563f05e980d8) at planner.c:407\n#7 0x0000563f035f84f4 in planner (parse=0x563f05e60c08, \nquery_string=0x563f05ffd8d0 \"DELETE FROM test_remote WHERE \nrow(i,j)=row(new.i,new.j)\", cursorOptions=2048, \nboundParams=0x563f05e980d8) at planner.c:271\n#8 0x0000563f0373f9c7 in pg_plan_query (querytree=0x563f05e60c08, \nquery_string=0x563f05ffd8d0 \"DELETE FROM test_remote WHERE \nrow(i,j)=row(new.i,new.j)\", cursorOptions=2048, \nboundParams=0x563f05e980d8) at postgres.c:847\n#9 0x0000563f0373fb15 in pg_plan_queries (querytrees=0x563f05e60bb0, \nquery_string=0x563f05ffd8d0 \"DELETE FROM test_remote WHERE \nrow(i,j)=row(new.i,new.j)\", cursorOptions=2048, \nboundParams=0x563f05e980d8) at postgres.c:939\n#10 0x0000563f038dade2 in BuildCachedPlan (plansource=0x563f06027590, \nqlist=0x563f05e60bb0, boundParams=0x563f05e980d8, queryEnv=0x0) at \nplancache.c:936\n#11 0x0000563f038db4d8 in GetCachedPlan (plansource=0x563f06027590, \nboundParams=0x563f05e980d8, owner=0x563f05f6c478, queryEnv=0x0) at \nplancache.c:1218\n#12 0x0000563f03542235 in _SPI_execute_plan (plan=0x563f060c9ac0, \nparamLI=0x563f05e980d8, snapshot=0x0, crosscheck_snapshot=0x0, \nread_only=false, allow_nonatomic=false, fire_triggers=true, tcount=0, \ncaller_dest=0x0, plan_owner=0x563f05f6c478) at spi.c:2405\n#13 0x0000563f0353ebb9 in SPI_execute_plan_with_paramlist \n(plan=0x563f060c9ac0, params=0x563f05e980d8, read_only=false, tcount=0) \nat spi.c:651\n#14 0x00007f1032c2f504 in exec_stmt_execsql (estate=0x7ffcab5c6420, \nstmt=0x563f05bfb868) at pl_exec.c:4214\n#15 0x00007f1032c2a9bc in exec_stmts (estate=0x7ffcab5c6420, \nstmts=0x563f05bfb8c0) at pl_exec.c:2059\n#16 0x00007f1032c2b854 in exec_stmt_if (estate=0x7ffcab5c6420, \nstmt=0x563f06028700) at pl_exec.c:2481\n#17 0x00007f1032c2a842 in exec_stmts (estate=0x7ffcab5c6420, \nstmts=0x563f06028758) at pl_exec.c:2003\n#18 0x00007f1032c2a579 in exec_stmt_block (estate=0x7ffcab5c6420, \nblock=0x563f060288c0) at pl_exec.c:1910\n#19 0x00007f1032c29cac in exec_toplevel_block (estate=0x7ffcab5c6420, \nblock=0x563f060288c0) at pl_exec.c:1608\n#20 0x00007f1032c28730 in plpgsql_exec_trigger (func=0x563f05f6d940, \ntrigdata=0x7ffcab5c6880) at pl_exec.c:1024\n#21 0x00007f1032c43319 in plpgsql_call_handler (fcinfo=0x7ffcab5c6700) \nat pl_handler.c:268\n#22 0x0000563f034a3e2a in ExecCallTriggerFunc (trigdata=0x7ffcab5c6880, \ntgindx=0, finfo=0x563f05e7aa60, instr=0x0, \nper_tuple_context=0x563f05c763d0) at trigger.c:2141\n#23 0x0000563f034a7674 in AfterTriggerExecute (estate=0x563f05e7a2b0, \nevent=0x563f05e1a580, relInfo=0x563f05e7a738, trigdesc=0x563f05e7a950, \nfinfo=0x563f05e7aa60, instr=0x0, per_tuple_context=0x563f05c763d0, \ntrig_tuple_slot1=0x0, trig_tuple_slot2=0x0) at trigger.c:4034\n#24 0x0000563f034a7b8d in afterTriggerInvokeEvents \n(events=0x563f05f3c540, firing_id=1, estate=0x563f05e7a2b0, \ndelete_ok=false) at trigger.c:4250\n#25 0x0000563f034a8353 in AfterTriggerEndQuery (estate=0x563f05e7a2b0) \nat trigger.c:4587\n#26 0x0000563f034dc8eb in standard_ExecutorFinish \n(queryDesc=0x563f060279a0) at execMain.c:436\n#27 0x0000563f034dc7c5 in ExecutorFinish (queryDesc=0x563f060279a0) at \nexecMain.c:404\n#28 0x0000563f03745edc in ProcessQuery (plan=0x563f05fc8100, \nsourceText=0x563f05ad74a0 \"DELETE FROM test WHERE i=1;\", params=0x0, \nqueryEnv=0x0, dest=0x563f05fc81f0, qc=0x7ffcab5c6cf0) at pquery.c:190\n#29 0x0000563f037478fd in PortalRunMulti (portal=0x563f05b3aa50, \nisTopLevel=true, setHoldSnapshot=false, dest=0x563f05fc81f0, \naltdest=0x563f05fc81f0, qc=0x7ffcab5c6cf0) at pquery.c:1266\n#30 0x0000563f03746e24 in PortalRun (portal=0x563f05b3aa50, \ncount=9223372036854775807, isTopLevel=true, run_once=true, \ndest=0x563f05fc81f0, altdest=0x563f05fc81f0, qc=0x7ffcab5c6cf0) at \npquery.c:786\n#31 0x0000563f03740084 in exec_simple_query (query_string=0x563f05ad74a0 \n\"DELETE FROM test WHERE i=1;\") at postgres.c:1214\n#32 0x0000563f03744c41 in PostgresMain (argc=1, argv=0x7ffcab5c6f10, \ndbname=0x563f05b02938 \"contrib_regression\", username=0x563f05b02918 \n\"leoric\") at postgres.c:4486\n#33 0x0000563f03670f3a in BackendRun (port=0x563f05af8f00) at \npostmaster.c:4507\n#34 0x0000563f036707f3 in BackendStartup (port=0x563f05af8f00) at \npostmaster.c:4229\n#35 0x0000563f0366c97c in ServerLoop () at postmaster.c:1745\n#36 0x0000563f0366c115 in PostmasterMain (argc=8, argv=0x563f05ad1820) \nat postmaster.c:1417\n#37 0x0000563f0356193d in main (argc=8, argv=0x563f05ad1820) at \nmain.c:209\n(gdb) print *subplan\n$2 = {type = T_Result, startup_cost = 0, total_cost = 0, plan_rows = 0, \nplan_width = 0, parallel_aware = false, parallel_safe = false, \nasync_capable = false, plan_node_id = 0, targetlist = 0x563f06020d40, \nqual = 0x0, lefttree = 0x0, righttree = 0x0, initPlan = 0x0,\n extParam = 0x0, allParam = 0x0}\n\n\n-- \nBest regards,\nAlexander Pyhalov,\nPostgres Professional", "msg_date": "Wed, 07 Jul 2021 17:06:14 +0300", "msg_from": "Alexander Pyhalov <a.pyhalov@postgrespro.ru>", "msg_from_op": true, "msg_subject": "PostgreSQL 14 backend crash on incorrect trigger" }, { "msg_contents": "Alexander Pyhalov <a.pyhalov@postgrespro.ru> writes:\n> The following test case makes postgresql backend crash. The trigger is \n> incorrect, but this didn't crash postgresql before\n\nYup, that's a silly oversight. Fixed, thanks for the report!\n\n\t\t\tregards, tom lane\n\n\n", "msg_date": "Wed, 07 Jul 2021 15:22:06 -0400", "msg_from": "Tom Lane <tgl@sss.pgh.pa.us>", "msg_from_op": false, "msg_subject": "Re: PostgreSQL 14 backend crash on incorrect trigger" } ]