diff --git a/.gitattributes b/.gitattributes index afd6f683b07952b8cf8b68a8e61b72dda1e237a2..59bfcfcbe6f29d8f14db9b470286eab90488ce47 100644 --- a/.gitattributes +++ b/.gitattributes @@ -9500,3 +9500,59 @@ fdAzT4oBgHgl3EQfMPug/content/2301.01129v1.pdf filter=lfs diff=lfs merge=lfs -tex N9AzT4oBgHgl3EQfk_2S/content/2301.01541v1.pdf filter=lfs diff=lfs merge=lfs -text J9FJT4oBgHgl3EQfwy0B/content/2301.11631v1.pdf filter=lfs diff=lfs merge=lfs -text htAzT4oBgHgl3EQfa_y7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +qtE0T4oBgHgl3EQfrAFn/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ENE4T4oBgHgl3EQffQ0N/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +sdE1T4oBgHgl3EQf3QVe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +YtE2T4oBgHgl3EQfvAjJ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +nNE1T4oBgHgl3EQfhQT4/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +kNE2T4oBgHgl3EQfIgYI/content/2301.03680v1.pdf filter=lfs diff=lfs merge=lfs -text +W9E0T4oBgHgl3EQfVwD1/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +W9E0T4oBgHgl3EQfVwD1/content/2301.02270v1.pdf filter=lfs diff=lfs merge=lfs -text +l9FAT4oBgHgl3EQfcB1A/content/2301.08561v1.pdf filter=lfs diff=lfs merge=lfs -text +jtE0T4oBgHgl3EQf7QKQ/content/2301.02774v1.pdf filter=lfs diff=lfs merge=lfs -text +xdE4T4oBgHgl3EQfYAwp/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +4tFKT4oBgHgl3EQf9i5P/content/2301.11954v1.pdf filter=lfs diff=lfs merge=lfs -text +Q9AyT4oBgHgl3EQft_kP/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ptE0T4oBgHgl3EQfqwEz/content/2301.02556v1.pdf filter=lfs diff=lfs merge=lfs -text +nNE3T4oBgHgl3EQfKwlD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +_tFKT4oBgHgl3EQfVC14/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +lNE5T4oBgHgl3EQfiQ-H/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +N9AzT4oBgHgl3EQfk_2S/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +J9FJT4oBgHgl3EQfwy0B/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +dtAzT4oBgHgl3EQfZ_z3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +N9FAT4oBgHgl3EQfyR7D/content/2301.08692v1.pdf filter=lfs diff=lfs merge=lfs -text +XNFRT4oBgHgl3EQfNTdg/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +SdFJT4oBgHgl3EQfLiwk/content/2301.11469v1.pdf filter=lfs diff=lfs merge=lfs -text +l9FAT4oBgHgl3EQfcB1A/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +JNA0T4oBgHgl3EQfCf8h/content/2301.01989v1.pdf filter=lfs diff=lfs merge=lfs -text +N9FAT4oBgHgl3EQfyR7D/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ddE4T4oBgHgl3EQfpg0y/content/2301.05192v1.pdf filter=lfs diff=lfs merge=lfs -text +ldFRT4oBgHgl3EQfYzfm/content/2301.13551v1.pdf filter=lfs diff=lfs merge=lfs -text +xtE4T4oBgHgl3EQfxg0u/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +kNE2T4oBgHgl3EQfIgYI/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +fdAzT4oBgHgl3EQfMPug/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +rdFPT4oBgHgl3EQf9zV6/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +5tFAT4oBgHgl3EQfmx2I/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +k9E5T4oBgHgl3EQfHA4j/content/2301.05435v1.pdf filter=lfs diff=lfs merge=lfs -text +b9E3T4oBgHgl3EQfGAm_/content/2301.04311v1.pdf filter=lfs diff=lfs merge=lfs -text +jtE0T4oBgHgl3EQf7QKQ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +idE3T4oBgHgl3EQfggra/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +4tFKT4oBgHgl3EQf9i5P/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +MNAyT4oBgHgl3EQf6vpX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +u9AzT4oBgHgl3EQfdPx0/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +r9FAT4oBgHgl3EQfgB2V/content/2301.08585v1.pdf filter=lfs diff=lfs merge=lfs -text +WNE3T4oBgHgl3EQfFQlM/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +Q9AyT4oBgHgl3EQft_kP/content/2301.00603v1.pdf filter=lfs diff=lfs merge=lfs -text +hdFJT4oBgHgl3EQfVyxJ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +7tFLT4oBgHgl3EQfsi8k/content/2301.12147v1.pdf filter=lfs diff=lfs merge=lfs -text +FdAzT4oBgHgl3EQfi_0V/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +CdAzT4oBgHgl3EQfGftE/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +xNE0T4oBgHgl3EQf-QLn/content/2301.02813v1.pdf filter=lfs diff=lfs merge=lfs -text +SdFJT4oBgHgl3EQfLiwk/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +k9FAT4oBgHgl3EQfbR2A/content/2301.08557v1.pdf filter=lfs diff=lfs merge=lfs -text +k9E5T4oBgHgl3EQfHA4j/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +p9E1T4oBgHgl3EQfigQB/content/2301.03251v1.pdf filter=lfs diff=lfs merge=lfs -text +ldFRT4oBgHgl3EQfYzfm/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +t9AzT4oBgHgl3EQfPvsd/content/2301.01187v1.pdf filter=lfs diff=lfs merge=lfs -text +ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf filter=lfs diff=lfs merge=lfs -text +z9FLT4oBgHgl3EQfoS-L/content/2301.12131v1.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/2dAyT4oBgHgl3EQfovhy/content/tmp_files/2301.00512v1.pdf.txt b/2dAyT4oBgHgl3EQfovhy/content/tmp_files/2301.00512v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..b757d37874501283dd694ddedaec6c75ed4c6fbe --- /dev/null +++ b/2dAyT4oBgHgl3EQfovhy/content/tmp_files/2301.00512v1.pdf.txt @@ -0,0 +1,1469 @@ +On the Challenges of using Reinforcement Learning in Precision Drug Dosing: +Delay and Prolongedness of Action Effects +Sumana Basu1,2, Marc-Andr´e Legault1,2, Adriana Romero-Soriano1,2,3, Doina Precup1,2 +1 McGill University, 2 Mila, 3 Meta AI +sumana.basu@mail.mcgill.ca, marc-andre.legault@mcgill.ca, adriana.romsor@gmail.com, dprecup@cs.mcgill.ca +Abstract +Drug dosing is an important application of AI, which can +be formulated as a Reinforcement Learning (RL) problem. +In this paper, we identify two major challenges of using RL +for drug dosing: delayed and prolonged effects of adminis- +tering medications, which break the Markov assumption of +the RL framework. We focus on prolongedness and define +PAE-POMDP (Prolonged Action Effect-Partially Observable +Markov Decision Process), a subclass of POMDPs in which +the Markov assumption does not hold specifically due to pro- +longed effects of actions. Motivated by the pharmacology lit- +erature, we propose a simple and effective approach to con- +verting drug dosing PAE-POMDPs into MDPs, enabling the +use of the existing RL algorithms to solve such problems. +We validate the proposed approach on a toy task, and a +challenging glucose control task, for which we devise a +clinically-inspired reward function. Our results demonstrate +that: (1) the proposed method to restore the Markov as- +sumption leads to significant improvements over a vanilla +baseline; (2) the approach is competitive with recurrent +policies which may inherently capture the prolonged effect +of actions; (3) it is remarkably more time and memory +efficient than the recurrent baseline and hence more suitable +for real-time dosing control systems; and (4) it exhibits +favourable qualitative behavior in our policy analysis. +1 +Introduction +Drug dosing plays an important role in human health– +e.g. individuals with type 1 diabetes require regular insulin +injections to manage their blood glucose levels, intensive +care patients require continuous monitoring and adminis- +tration of drugs, optimal doses of anaesthesia are required +during operative procedures, etc. Optimal drug dosing is +most important in cases where the therapeutic window is +narrow, meaning that small deviations from the therapeutic +range of drug concentration may lead to serious clinical +complications (FDA 2015; Maxfield and Zineh 2021). These +problems are compounded by idiosyncratic differences in +the dynamics of drug absorption, distribution, metabolism or +excretion (collectively referred to as pharmacokinetics) and +drug sensitivity. Therefore, one important goal of precision +medicine is to tailor patient care while accounting for indi- +vidual characteristics, and developing algorithmic solutions +for drug dosing is a contribution towards that broader goal. +Figure 1: Delayed and prolonged effects of drugs. Drug is +only administered at the first step, but no immediate effect +is observed. After the initial delay, the initial dose keeps +altering the patient status for a while, even in the absence of +additional dosage. The duration of delay and prolongedness +is individual specific. +Reinforcement learning (RL) offers a framework to ac- +count for individual characteristics and automatically derive +personalized treatment policies in line with the objective of +precision medicine (Ribba et al. 2020). However, RL-based +algorithms cannot be applied off-the-shelf to tackle preci- +sion dosing since all drugs are known to have a delayed +and prolonged effect from the point of medication (Holford +2018) (see Figure 1). The delay is attributed to the time it +takes for the drug to distribute to the target site, bind to the +receptor and finally to change physiological substances be- +fore its response can be observed. This can vary between +minutes to hours or even longer (Holford 2018). The pro- +longedness is due to individual variation in pharmacokinet- +ics (Vogenberg, Barash, and Pursel 2010). In this delayed +and prolonged action effects scenario, the future depends +on the previous drug dosages and their effect, and there- +fore the Markov assumption usually made by RL algorithms +no longer holds. Although RL has been applied to address +drug dosing problems such as controlling glucose levels for +closed loop artificial pancreas (Tejedor, Woldaregay, and +Godtliebsen 2020), the violation of the Markov assumption +is in this case not only problematic from an RL research per- +spective, but also from a safety perspective, as ignoring the +delayed and prolonged effects of a drug can lead to drug +overdosing related toxicity (Guengerich 2011). +Contributions. +In this paper, we identify prolongedness +and delay as fundamental roadblocks to using RL in pre- +cision drug dosing, and focus on addressing the former. To +that end, we introduce the prolonged action effect partially +observable Markov decision process (PAE-POMDP), a +arXiv:2301.00512v1 [cs.LG] 2 Jan 2023 + +Dose +No effect +Prolonged +Prolonged Prolonged Effect ends +administration (delay) +effect +effect +effect +(Recovery)framework for modeling delayed action effects in decision +making. We then present assumptions inspired by the +pharmacology literature to convert the non-Markovian +prolonged action effect problem into a Markov decision +process (MDP), therefore enabling the use of recent ad- +vances in model-free RL. To the best of our knowledge, our +work is the first to explore the prolonged effect of actions. +We validate the proposed approach on a toy task, where +the only violation of the Markov property comes from +the prolonged effect of actions, and show that restoring +the Markov assumption allows the RL agent we develop +to significantly outperform previous baselines. We also +address the challenging task of glucose control by optimal +insulin dosing for type 1 diabetes patients by leveraging the +open-sourced version of the FDA-approved UDA/Padova +simulator (Dalla, MD, and C. 2009; Xie 2018). Although +the glucose control task has garnered interest in the RL +literature in the past (Tejedor, Woldaregay, and Godtliebsen +2020), there does not appear to be a widely adopted reward +function. Therefore, we design a clinically motivated +reward function that explicitly avoids overdosing while +effectively controlling glucose levels. Our results show that +our approach of converting the PAE-POMDP into a MDP +is not only competitive in terms of performance, but is also +remarkably more time and memory efficient than the base- +lines, making it more suitable for real-time dosing control +systems. With these contributions, we aim to raise aware- +ness of both the delayed and prolonged effects of drugs +while tackling personalized drug dosing with RL, and hope +that the proposed reward function will help break the entry +barrier to control glucose levels for a closed-loop artificial +pancreas, while fostering future research in this direction. +What this paper does not do. +This paper does not aim +to provide a general solution to tackle the prolonged effect +of actions, but one crafted specifically for precision drug +dosing that allows to quickly bring the problem back to the +MDP framework in a compute- and memory-efficient way. +We do not provide a new algorithm to tackle prolongedness, +but instead propose a pharmacologically motivated effective +way to enable the use of already existing RL algorithms +for precision drug dosing. We are assuming drug action +in isolation, instead of in combination with other drugs. +Learning and predicting drug synergies is a growing body +of research that can provide interesting future work. +2 +Related Work +Blood glucose control in individuals with Type 1 diabetes is +a longstanding problem in medicine that has stimulated the +interest of RL researchers for a long time. In a systematic +review of reinforcement learning approaches for blood +glucose management, Tejedor, Woldaregay, and Godtlieb- +sen (2020) reported 347 papers between 1990 and 2019 +on the topic, of which 11 used the UVA/Padova simulator +(Dalla, MD, and C. 2009; Dalla et al. 2014; Xie 2018). +While most of these work explore RL algorithms – e.g. Ac- +tor Critic, Q-learning, SARSA, and DQN among others +– to address the glucose control problem, none of them +acknowledges the fundamental challenge of prolongedness +and most consider the current blood glucose level as a +sufficient statistic for state information. Even in the cases +where actions are considered as part of the state information, +only the most recent action is taken into account, which +is not enough to restore the Markov assumption violated +due to prolongedness (see appendix). Fox et al. (2020) +formalized the problem as a POMDP by augmenting 4 +hours of blood glucose and insulin information, but they did +not recognize prolongeness of drug effects as the reason for +partial observability. In fact, to handle prolongedness, the +most recent blood glucose measurement and residual active +insulin are sufficient, as we show in the experiments section. +There is a similar theme of exploring various state of +the art deep RL algorithms for other drug dosing problems. +However, these works also miss the crucial point of recog- +nizing and handling the prolonged effect of drugs. In a non- +exhaustive list, Nemati, Ghassemi, and Clifford (2016) use +a Q-Network to learn a personalized heparin dosing policy, +Weng et al. (2017) use policy iteration to manage glycemia +for septic patients in critical care, Lin et al. (2018) propose +the use of Deep Deterministic Policy Gradient (DDPG, (Lil- +licrap et al. 2016)) for heparin dosing in critical care pa- +tients, and Lopez-Martinez et al. (2019) use Double Deep Q- +Networks (DDQN) to administer opioids for pain manage- +ment in critical care patients. But none of these papers dis- +cusses the prolonged effect of drug doses. Zadeh, Street, and +Thomas (2022) use Deep Q-Networks (DQN) to administer +the anticoagulant warfarin, and they consider a pre-defined +duration along with a dose. They use a Pharmacokinet- +ic/Pharmacodynamic (PK/PD) model to determine the dura- +tion and add it to the state information, along with the patient +information, blood coagulability measure and dosing his- +tory. However, the authors do not explicitly recognize pro- +longed drug effect as one of the reasons for adding the dura- +tion information. In addition, since dose response is individ- +ual specific, finding the right duration is a challenge in itself. +3 +Background +Markov +Decision +Process +(MDP). +Reinforcement +Learning (RL) is a framework for solving sequential +decision making problems where an agent interacts with +the environment and receives feedback in the form of +reward. The typical formal framework for RL is the +Markov Decision Process (MDP). A MDP M is a 5-tuple +(S, A, r, P, γ), where S is a (finite) set of states, A is a +(finite) set of actions, P is the state transition probability +P(st+1 = s′|st = s, at = a), r : S × A × S → R is the +reward function and γ ∈ [0, 1) is the discount factor. As the +name suggests, a MDP obeys the Markov assumption that +the future is in independent of the past given the present, +which means that transitions and rewards depend only on +the current state and action and not on the past history. The +goal of an RL agent is to find a policy π : S×A → [0, 1] that +maximizes the cumulative discounted return �∞ +t=0 γtrt. +POMDP and Recurrent Policy. +A Partially Observable +MDP (POMDP) is a 7-tuple (S, A, r, P, Ω, O, γ), where +Ω is the set of observations, O is the set of conditional +observation probabilities, O(ω|s), also known as the emis- + +sion function, and the rest of the elements are the same as +in an MDP. In a POMDP, the agent does not have direct +access to the identity of the states, instead needing to infer +them through the observations. Note that while in a MDP, +an agent which aims to act optimally with respect to the +expected long-term return only needs to consider Markovian +policies, π : S × A → [0, 1], in a POMDP, policies need to +either rely on the entire history of action and observations, +or to infer the distribution of hidden states from this history +(as in belief-based POMDP dolution methods). In recent +work, recurrent networks have become the standard for +implementing POMDP policies. We call recurrent policy +a mapping π : T × A → [0, 1] where T is the space of +trajectories τ = {(ot, at, rt)}T +t=0 of up to T time steps. +Q-Learning and Deep Q-Networks (DQN). +Q-Learning +(Watkins and Dayan 1989) is a model-free RL algorithm +that estimates Q : S × A → R, a value function as- +sessing the quality of action at ∈ A at state st ∈ S. +The value function can be estimated as: Q(st, at) += +Q(st, at) + α +� +rt + γ maxa Q(st+1, a) − Q(st, at) +� +, +where α ∈ (0, 1) is the learning rate. In DQN (Mnih, +Kavukcuoglu, and et al. 2015), the Q-values are estimated +by a neural network minimizing the Mean Square Bellman +Error: Li(θi) = Est,at∼πb +� +(yi − Q(st, at; θi))2� +yi = (rt + γ maxat+1 Q(st+1, at+1; θi−1), where πb is the +behavior policy, yi is the target and θi are the parameters of +the Q-network at iteration i. +4 +Method +4.1 +Problem Formulation +Drug dosing can be formalized as a POMDP, because the +effect of a medication is felt over a period of time after +its administration. In this section, we introduce prolonged +action effect POMDPs, referred to as PAE-POMDPs, a sub- +class of POMDPs in which an action’s effect lasts more than +one time step. More precisely, the action at taken at time +step t continues to affect the future states of the environment +for κ ∈ Z+ time steps, where κ is the time interval necessary +for at’s effect to fall below a given threshold. The value of +κ is environment specific and also depends on the amplitude +of the action at. The above-defined prolongedness offers a +forward view in time. From a backward view perspective, +the state at a time step t is the result of super-imposing the +effects of the actions from several preceding time steps, +which are still felt at time t i.e. st+1 = st + �κ +k=0 ∆at−k, +where ∆at is the effect of action at on the state. In other +words, the Markov transition function P(st+1|st, at) is no +longer valid, and instead the transition function becomes +P(st+1|st, (at−k)κ +k=0). In this case, the reward function +can still be assumed to be Markov R(st, at, st+1) (see +Equation 2). From the perspective of the agent, this means +that it needs to keep track of the history of actions over +a preceding period of time. In general, this problem is +no simpler than a regular POMDP, as an agent that keeps +track of its history may need to remember all the actions +taken since the beginning of time. However, in this work, +we consider a more circumscribed problem formulation +which is relevant for therapeutic dosing. Specifically, we +use knowledge from pharmacology to enable us to restore +the Markov assumptions for drug-dosing PAE-POMDP. +4.2 +Converting PAE-POMDP into MDP +In this section, we present the specific knowledge used to +restore the Markov assumptions in drug dosing problems +and show how by leveraging them we can effectively +convert a PAE-POMDP into an MDP. +Action and effect equivalence. +We start by noting that as +per the pharmacodynamics axioms, drug effects are deter- +mined by drug concentration at the site of action (Holford +1984, 2018). Therefore, action and effect are considered to +be equivalent and used interchangeably hereinafter. +Exponential rate of decay. +Motivated by the pharmacol- +ogy (Benet and Zia-Amirhosseini 1995; Dasgupta and Kra- +sowski 2020), biotechnology (Hobbie and Roth 2007), as +well as chemical- kinetics (Peter and de Paula Julio 2006) +literature, we adopt an exponential decay model of drug con- +centration over time (Annamalai 2010). In particular, we as- +sume that the initial action effect will decay at a constant rate +λ, which is specific to the environment (drug as well as indi- +vidual). Formally, we assume that a>t≤κ = λat. Note that +here we implicitly assume that the action’s effects can safely +be ignored after κ time-steps. This assumption is valid in the +context of drug dosing, since drugs are ineffective below a +certain concentration. The observation of the agent at a par- +ticular time step can therefore be defined conditional on the +current state and on all the past actions: ot+1 ∼ O(.|st, a