diff --git a/-tFQT4oBgHgl3EQf7DaV/content/2301.13441v1.pdf b/-tFQT4oBgHgl3EQf7DaV/content/2301.13441v1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..2972f578aca698152294704f60854f50488d86ca --- /dev/null +++ b/-tFQT4oBgHgl3EQf7DaV/content/2301.13441v1.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0172ae898a13d5ff658ea251ab85c0b3c897f64017b55cde5753fa996468cd6 +size 1044862 diff --git a/-tFQT4oBgHgl3EQf7DaV/vector_store/index.faiss b/-tFQT4oBgHgl3EQf7DaV/vector_store/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..9594bd37f38c257089dbf111ee12237277dd3668 --- /dev/null +++ b/-tFQT4oBgHgl3EQf7DaV/vector_store/index.faiss @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e37490f897f2e4699be69957be9ea39a71a982446fb4c4c980bf20eb22b09280 +size 5373997 diff --git a/.gitattributes b/.gitattributes index f332f93ce49cb57798d9500011a12717b9801c5c..cb527d5e8ac43921f217035ffb2e4c268f543733 100644 --- a/.gitattributes +++ b/.gitattributes @@ -8400,3 +8400,54 @@ ytFLT4oBgHgl3EQfnC-z/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex 79E1T4oBgHgl3EQfBwKM/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text ANE0T4oBgHgl3EQfxgKB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text ztAzT4oBgHgl3EQftf2q/content/2301.01677v1.pdf filter=lfs diff=lfs merge=lfs -text +ddE3T4oBgHgl3EQfGwlg/content/2301.04317v1.pdf filter=lfs diff=lfs merge=lfs -text +bdE4T4oBgHgl3EQfPQyn/content/2301.04972v1.pdf filter=lfs diff=lfs merge=lfs -text +idFJT4oBgHgl3EQfWizT/content/2301.11518v1.pdf filter=lfs diff=lfs merge=lfs -text +-tFQT4oBgHgl3EQf7DaV/content/2301.13441v1.pdf filter=lfs diff=lfs merge=lfs -text +sdAzT4oBgHgl3EQfPPvE/content/2301.01181v1.pdf filter=lfs diff=lfs merge=lfs -text +stE0T4oBgHgl3EQfrgHc/content/2301.02568v1.pdf filter=lfs diff=lfs merge=lfs -text +D9FRT4oBgHgl3EQfxziA/content/2301.13643v1.pdf filter=lfs diff=lfs merge=lfs -text +s9E3T4oBgHgl3EQfjgr4/content/2301.04590v1.pdf filter=lfs diff=lfs merge=lfs -text +AtE2T4oBgHgl3EQfnAjp/content/2301.04005v1.pdf filter=lfs diff=lfs merge=lfs -text +DNE3T4oBgHgl3EQfUwpD/content/2301.04453v1.pdf filter=lfs diff=lfs merge=lfs -text +AtE2T4oBgHgl3EQfnAjp/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ndAzT4oBgHgl3EQfqf0O/content/2301.01628v1.pdf filter=lfs diff=lfs merge=lfs -text +-tFQT4oBgHgl3EQf7DaV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +J9FIT4oBgHgl3EQfZytY/content/2301.11254v1.pdf filter=lfs diff=lfs merge=lfs -text +WdE0T4oBgHgl3EQfmAG-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +UNE3T4oBgHgl3EQfagoR/content/2301.04506v1.pdf filter=lfs diff=lfs merge=lfs -text +sNE1T4oBgHgl3EQfjQRT/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +D9FRT4oBgHgl3EQfxziA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +bdE4T4oBgHgl3EQfPQyn/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +vdAzT4oBgHgl3EQfP_uD/content/2301.01193v1.pdf filter=lfs diff=lfs merge=lfs -text +sdAzT4oBgHgl3EQfPPvE/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +n9E_T4oBgHgl3EQf8Byb/content/2301.08373v1.pdf filter=lfs diff=lfs merge=lfs -text +XNAzT4oBgHgl3EQf1v6R/content/2301.01804v1.pdf filter=lfs diff=lfs merge=lfs -text +stE1T4oBgHgl3EQfjgQE/content/2301.03262v1.pdf filter=lfs diff=lfs merge=lfs -text +zNAyT4oBgHgl3EQf0vni/content/2301.00725v1.pdf filter=lfs diff=lfs merge=lfs -text +ddE2T4oBgHgl3EQfGAap/content/2301.03653v1.pdf filter=lfs diff=lfs merge=lfs -text +QNE3T4oBgHgl3EQfCwmU/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +6NE5T4oBgHgl3EQfPg5N/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +TtE0T4oBgHgl3EQf2QKO/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +b9AyT4oBgHgl3EQf-PqC/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +SNAzT4oBgHgl3EQfJPtx/content/2301.01076v1.pdf filter=lfs diff=lfs merge=lfs -text +udFKT4oBgHgl3EQf3i5N/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +zdAzT4oBgHgl3EQfC_pK/content/2301.00968v1.pdf filter=lfs diff=lfs merge=lfs -text +ptE4T4oBgHgl3EQfvQ3k/content/2301.05241v1.pdf filter=lfs diff=lfs merge=lfs -text +_tE1T4oBgHgl3EQf8wWT/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +YtFOT4oBgHgl3EQf-DRf/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +2dAyT4oBgHgl3EQfPvZl/content/2301.00030v1.pdf filter=lfs diff=lfs merge=lfs -text +zdAzT4oBgHgl3EQfC_pK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +1tE0T4oBgHgl3EQfdgCu/content/2301.02378v1.pdf filter=lfs diff=lfs merge=lfs -text +ddE2T4oBgHgl3EQfGAap/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +UNE3T4oBgHgl3EQfagoR/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +cNAzT4oBgHgl3EQfnf3Z/content/2301.01583v1.pdf filter=lfs diff=lfs merge=lfs -text +Y9FRT4oBgHgl3EQfPDf9/content/2301.13516v1.pdf filter=lfs diff=lfs merge=lfs -text +htAyT4oBgHgl3EQfXvc6/content/2301.00188v1.pdf filter=lfs diff=lfs merge=lfs -text +u9E3T4oBgHgl3EQfNwnN/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +ptE4T4oBgHgl3EQfvQ3k/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +MNE1T4oBgHgl3EQfHAOD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +_tA0T4oBgHgl3EQfPf_J/content/2301.02177v1.pdf filter=lfs diff=lfs merge=lfs -text +vdAzT4oBgHgl3EQfP_uD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text +MNE1T4oBgHgl3EQfHAOD/content/2301.02921v1.pdf filter=lfs diff=lfs merge=lfs -text +VtE4T4oBgHgl3EQfng0j/content/2301.05176v1.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/09FQT4oBgHgl3EQfEDVH/content/tmp_files/2301.13236v1.pdf.txt b/09FQT4oBgHgl3EQfEDVH/content/tmp_files/2301.13236v1.pdf.txt new file mode 100644 index 0000000000000000000000000000000000000000..2c508a16aaf7301bf000d68589f6ef715466ef1d --- /dev/null +++ b/09FQT4oBgHgl3EQfEDVH/content/tmp_files/2301.13236v1.pdf.txt @@ -0,0 +1,2222 @@ +SoftTreeMax: Exponential Variance Reduction in Policy Gradient +via Tree Search +Gal Dalal * Assaf Hallak * Gugan Thoppe Shie Mannor Gal Chechik +Abstract +Despite the popularity of policy gradient meth- +ods, they are known to suffer from large vari- +ance and high sample complexity. To mitigate +this, we introduce SoftTreeMax – a generaliza- +tion of softmax that takes planning into account. +In SoftTreeMax, we extend the traditional logits +with the multi-step discounted cumulative reward, +topped with the logits of future states. We con- +sider two variants of SoftTreeMax, one for cumu- +lative reward and one for exponentiated reward. +For both, we analyze the gradient variance and +reveal for the first time the role of a tree expan- +sion policy in mitigating this variance. We prove +that the resulting variance decays exponentially +with the planning horizon as a function of the +expansion policy. Specifically, we show that the +closer the resulting state transitions are to uni- +form, the faster the decay. In a practical imple- +mentation, we utilize a parallelized GPU-based +simulator for fast and efficient tree search. Our +differentiable tree-based policy leverages all gra- +dients at the tree leaves in each environment step +instead of the traditional single-sample-based gra- +dient. We then show in simulation how the vari- +ance of the gradient is reduced by three orders +of magnitude, leading to better sample complex- +ity compared to the standard policy gradient. On +Atari, SoftTreeMax demonstrates up to 5x better +performance in a faster run time compared to dis- +tributed PPO. Lastly, we demonstrate that high +reward correlates with lower variance. +1. Introduction +Policy Gradient (PG; Sutton et al. 1999) methods for Re- +inforcement Learning (RL) are often the first choice for +environments that allow numerous interactions at a fast pace +(Schulman et al., 2017). Their success is attributed to several +*Equal contribution . +Correspondence to: +Gal Dalal +, Assaf Hallak . +Preperint. +factors, including that they are easy-to-distribute to multiple +workers, require no assumptions on an underlying value +function, and have both on-policy and off-policy variants. +Despite their popularity, PG algorithms are also notoriously +unstable since they compute gradients over entire trajec- +tories (Liu et al., 2020; Xu et al., 2020). As a result, PG +algorithms tend to be highly inefficient in terms of sample +complexity. Several solutions were proposed to mitigate the +instability of PG methods, including baseline subtraction +(Greensmith et al., 2004; Weaver & Tao, 2001; Thomas & +Brunskill, 2017; Wu et al., 2018), anchor-point averaging +(Papini et al., 2018), and other variance reduction techniques +(Zhang et al., 2021; Shen et al., 2019; Pham et al., 2020). +A second family of algorithms that achieved state-of-the-art +results in several domains is based on planning (Silver et al., +2016; Ye et al., 2021). Planning is exercised primarily in the +context of value-based RL and is usually implemented using +a Tree Search (TS; Coulom 2006; Silver 2009). In this work, +we combine PG with TS by introducing a parameterized dif- +ferentiable policy that incorporates tree expansion. Namely, +our SoftTreeMax policy replaces the standard policy logits +of a state and action, with the expected value of trajectories +that originate from these state and action. +Combining TS into PG suite should be done with care given +the biggest hurdle of PG – its high gradient variance. This +raises prominent actionable questions that were ignored un- +til this work: How does the tree-expansion policy affect +the PG variance? And, can we design tree-expansion that +is guaranteed to strongly reduces that variance? Here, we +analyze the gradient variance of SoftTreeMax, and provide +a practical methodology to choose the expansion policy to +minimize the resulting variance. Our main result shows that +a desirable expansion policy is one that induces transitions +as close to uniform as possible. More generally, we show +that the gradient variance of SoftTreeMax decays at an expo- +nential rate of |λ2|d, where d is the tree depth and λ2 is the +second eigenvalue of the transition matrix induced by the +tree expansion policy. This paper is the first to prove such a +relation between PG variance and TS expansion policy. +Common practices for expanding the tree rely on a value +estimate, using UCT (Kocsis & Szepesv´ari, 2006; Browne +et al., 2012), or based on some prior knowledge such as +arXiv:2301.13236v1 [cs.LG] 30 Jan 2023 + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +human-collected trajectories (Silver et al., 2018). Our work +raises the question of whether optimal variance reduction +corresponds to the appealing regret properties of UCT. +To verify our results, we implemented a practical version of +SoftTreeMax that exhaustively searches the entire tree and +applies a neural network on its leaves. We test our algorithm +in the Atari domain, where it is possible to span the whole +tree of the (nearly) deterministic Atari environment. Hence, +the gradient variance has no sampling component, and our +variance calculations indeed match the empirical gradient +variance. Our search mechanism uses a GPU simulator that +allows multiple copies of the environment to be run in par- +allel (Dalton et al., 2020). To enable a tractable deep search, +up to depth eight, we also introduce a pruning technique +that limits the width of the tree. We do so by sampling only +the most promising nodes at each level. +We integrate our SoftTreeMax GPU implementation into +the popular PPO (Schulman et al., 2017) and compare it to +the flat distributed variant of PPO. For a fair comparison, +we also run the distributed PPO baseline with the parallel +GPU emulator by Dalton et al. (2020). In all tested Atari +games, our results outperform the baseline and obtain up +to 5x more reward. We further show in Section 6 that the +associated gradient variance is smaller by three orders of +magnitude in all games, demonstrating the relation between +low gradient variance and high reward. +We summarize our key contributions: +1. We explore the relation between two seemingly unre- +lated families of SoTA approaches: PG and TS, and +show how they can be combined. +2. We introduce SoftTreeMax: A novel parametric policy +that generalizes softmax to planning. We propose both +cumulative and exponentiated reward variants. +3. We prove that the gradient variance of SoftTreeMax in +its two variants decays exponentially with its TS depth. +Our analysis sheds new light on the choice of tree +expansion policy. It raises the question of optimality in +terms of variance versus the previously studied regret. +4. We implement a differentiable deep version of +SoftTreeMax that employs a parallelized GPU TS. We +demonstrate how its gradient variance is reduced by +three orders of magnitude over PPO while obtaining +up to 5x reward. +2. Preliminaries +We follow the standard notation by (Puterman, 2014). Con- +sider a discounted Markov Decision Process (MDP) M = +(S, A, P, r, γ), where S is a finite state space of size S, A +is a finite action space of size A, r : S × A → [0, 1] is +the reward function, P : S × A → ∆S is the transition +function, and γ ∈ (0, 1) is the discount factor. In vector +form, denote the transition matrix starting from state s by +[Ps]a,s′ = Pr(s′|a, s) ∈ [0, 1]A×S, and the corresponding +reward vector by Rs = r(s, ·) ∈ RA. +Let π : S → ∆A be a stationary policy. We define the in- +duced transition matrix P π(s′|s) = � +a π(a|s) Pr(s′|s, a) +and reward function Rπ(s) = � +a π(a|s)r(s, a). +De- +note by µπ ∈ RS the stationary distribution of P π, s.t. +µ⊤ +π P π = P π. Also, let V π ∈ RS be the value function of +π defined by V π(s) = Eπ [�∞ +t=0 γtr (st, π(st)) | s0 = s], +and let Qπ ∈ RS×A be the Q-function such that Qπ(s, a) = +Eπ [r(s, a) + γV π(s′)]. +Our goal is to find an optimal policy π⋆ such that +V ⋆(s) ≡ V π⋆(s) = max +π +V π(s), +∀s ∈ S. +Lastly, for the analysis in Section 4, we introduce the follow- +ing vector notation. Denote by Θ ∈ RS the vector represen- +tation of θ(s) ∀s ∈ S. For a vector u, denote by exp(u) the +coordinate-wise exponent of u and by D(u) the diagonal +square matrix with u in its diagonal. For matrix A, denote +its i-th eigenvalue by λi(A). Denote the k-dimensional iden- +tity matrix and all-ones vector by Ik and 1k, respectively. +We denote the trace operator by Tr . Finally, We treat all +vectors as column vectors. +2.1. Policy Gradient +PG schemes seek to maximize the cumulative reward as a +function of the parameterized policy πθ(a|s) by perform- +ing gradient steps on θ. The celebrated Policy Gradient +Theorem (Sutton et al., 1999) states that +∂ +∂θ +� +µ⊤ +πθV πθ� += Es∼µπθ ,a∼πθ(·|s) [∇θ log πθ(a|s)Qπθ(s, a)] . +The variance of the gradient is thus +Vars∼µπθ ,a∼πθ(·|s) (∇θ log πθ(a|s)Qπθ(s, a)) . +(1) +In the notation above, we denote the variance of a vector +random variable X by: +Varx (X) = Tr +� +Ex +� +(X − ExX)⊤ (X − ExX] +�� +, +similarly as in (Greensmith et al., 2004). From here on, we +drop the subscript from Var in (1) for brevity. +When the action space is discrete, a commonly used param- +eterized policy is softmax: +πθ(a|s) ∝ exp (θ(s, a)) , +where θ : S × A → R is a state-action parametrization. + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +3. SoftTreeMax: Exponent of trajectories +We introduce a new family of policies called SoftTreeMax, +which are a model-based generalization of the popu- +lar softmax. +We propose two variants: +Cumulative +(C-SoftTreeMax) and Exponentiated (E-SoftTreeMax). In +both variants, we replace the generic softmax logits θ(s, a) +with the score of a trajectory of horizon d starting from +s, a, generated by applying a behavior policy πb. +In +C-SoftTreeMax, we exponentiate the expectation of the log- +its. In E-SoftTreeMax, we first exponentiate the logits, and +only then compute their expectation. +Logits. Let the SoftTreeMax logit ℓs,a(d; θ) be a random +variable depicting the score of a trajectory of horizon d +starting from s, a and following the policy πb : +ℓs,a(d; θ) = +d−1 +� +t=0 +γtrt + γdθ(sd). +(2) +Namely, s0 = s, a0 = a, at ∼ πb(·|st) ∀t ≥ 1, and +rt ≡ r (st, at) . For brevity of the analysis, we let the para- +metric score θ in (2) be state-based, similarly to a value +function. Instead, one could use a state-action input analo- +gous to a Q-function. This freedom allows easy integration +of SoftTreeMax to the two types of RL algorithm imple- +mentations in standard packages. +C-SoftTreeMax. Given an inverse temperature parameter +β, let C-SoftTreeMax be +πC +d,θ(a|s) ∝ exp [βEπbℓs,a(d; θ)] . +(3) +C-SoftTreeMax gives higher weight for actions that result +in higher expected returns. While standard softmax relies +entirely on parametrization θ, C-SoftTreeMax also interpo- +lates a Monte-Carlo portion of the reward. +Using the monotone convergence theorem (since rewards +are non-negative), it follows that when d → ∞, +πC +d→∞,θ(a|s) ∝ exp [βQπb(s, a)] , +corresponding to Boltzmann exploration (Sutton et al., 1999) +using the behavior policy πb. +E-SoftTreeMax. A second natural operator to consider is +E-SoftTreeMax, in which the expectation is taken outside +the exponent: +πE +d,θ(a|s) ∝ Eπb exp [(βℓs,a(d; θ))] . +(4) +This objective corresponds to the exponentiated reward ob- +jective which is often used for risk-sensitive RL (Howard +& Matheson, 1972; Fei et al., 2021; Noorani & Baras, +2021). The common risk-sensitive objective is of the form +log E[exp(δR)], where δ is the risk parameter and R is the +cumulative reward. Similarly to that literature, the exponent +in (4) emphasizes the most promising trajectories. +SoftTreeMax properties. SoftTreeMax is a natural model- +based generalization of softmax. For d = 0, both variants +above coincide, since (2) becomes deterministic. In that +case and for a state-action parametrization, they reduce +to standard softmax. When β → 0, both variants again +coincide and sample actions uniformly (exploration). When +β → ∞, the policies become deterministic and greedily +optimize for the best trajectory (exploitation). The best +trajectory is in expectation in the case of C-SoftTreeMax, +and in terms of best sample-path for E-SoftTreeMax. +SoftTreeMax convergence. Under regularity conditions, +for any parametric policy, PG converges to local optima +(Bhatnagar et al., 2009), and thus also SoftTreeMax. Specif- +ically for softmax PG, asymptotic (Agarwal et al., 2021) and +rate results (Mei et al., 2020b) were recently obtained. A fu- +ture direction would be to extend those for the convergence +properties of SoftTreeMax. +SoftTreeMax gradient. The two variants of SoftTreeMax +involve an expectation. This expectation is taken over Sd +many trajectories from the root state s and are weighted +according to their probability. Thus, during the PG train- +ing process, the gradient ∇θ log πθ is calculated using a +weighted sum of gradients over all reachable states starting +from s. Our method exploits the exponential number of tra- +jectories to reduce the variance. Indeed, in the next section +we prove that the gradient variance of SoftTreeMax decays +exponentially fast as a function of the behavior policy πb and +trajectory length d. In the experiments in Section 6, we also +show how the practical version of SoftTreeMax achieves +a significant reduction in the noise of the PG process and +leads to faster convergence and higher reward. +4. Theoretical Analysis +In this section, we bound the variance of PG when using +SoftTreeMax policy. Specifically, we show that the variance +decreases exponentially with the tree depth, where the rate +is determined by the second eigenvalue of the transition +kernel induced by πb. We analyze the gradient variance w.r.t. +state-action frequencies, as a function of problem param- +eters. Other types of analyses could have instead focused +on the estimation aspect in the context of sampling. Indeed, +in our implementation in Section 5, we manage to avoid +sampling and directly compute the expectations in Eqs. (3) +and (4). As we show later, we do so by leveraging efficient +parallel simulation on the GPU in feasible run-time. In our +application, due to the nature of the finite action space and +quasi-deterministic Atari dynamics (Bellemare et al., 2013), +our expectation estimator is noiseless. We encourage future +work to account for the finite-sample variance component. + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +We begin with a general variance bound that holds for any +parametric policy. We defer all the proofs in this section to +Appendix A.1. +Lemma 4.1 (Bound on the policy gradient variance). For +any parametric policy πθ and function Qπθ : S × A → R, +Var (∇θ log πθ(a|s)Qπθ(s, a)) +≤ max +s,a [Qπθ(s, a)]2 max +s +||∇θ log πθ(·|s)||2 +F , +where ∇θ log πθ(·|s) ∈ RA×dim(θ) is a matrix whose a-th +row is ∇θ log πθ(a|s)⊤. +Hence, to bound (1), it is sufficient to bound the Frobenius +norm of the policy gradient ∇θ log πθ(·|s) for any s. +A common assumption in the RL literature (Szepesv´ari, +2010) that we adopt for the remainder of the section is that +the transition matrix P πb, induced by the behavior policy +πb, is irreducible and aperiodic. Subsequently, its second +highest eigenvalue holds: |λ2(P πb)| < 1. +From here on, we split the variance results for the +two variants of SoftTreeMax to two subsections. +For +C-SoftTreeMax, the analysis is simpler and we provide an +exact bound. The case of E-SoftTreeMax is more involved +and we provide for it a more general result. In both cases, +we show that the variance decays exponentially with the +planning horizon. +4.1. Variance of C-SoftTreeMax +We express C-SoftTreeMax in vector form as follows. +Lemma 4.2 (Vector form of C-SoftTreeMax). For d ≥ 1, +(3) is given by +πC +d,θ(·|s) = +exp +� +β +� +Cs,d + γdPs (P πb)d−1 Θ +�� +1⊤ +A exp +� +β +� +Cs,d + γdPs (P πb)d−1 Θ +��, +(5) +where +Cs,d = Rs + Ps +�d−1 +� +h=1 +γh (P πb)h−1 +� +Rπb. +(6) +The matrix Cs,d ∈ RA×S represents the cumulative dis- +counted reward in expectation along the trajectory of hori- +zon d. Starting from the state s, the reward Rs is collected +and a transition occurs according to Ps. Then, the policy +πb is applied to obtain the reward Rπb and transition, and +the process repeats. When depth d is reached, we apply the +score function on the last state as depicted in (5). +Next, we express the policy gradient of C-SoftTreeMax +Lemma +4.3 +(Gradient +of +C-SoftTreeMax). +The +C-SoftTreeMax gradient of dimension A × S is given by +∇θ log πC +d,θ = βγd � +IA − 1A(πC +d,θ)⊤� +Ps (P πb)d−1 , +where for brevity, we drop the s index in the policy above, +i.e., πC +d,θ ≡ πC +d,θ(·|s). +We are now ready to present our first main result: +Theorem +4.4 +(Exponential +variance +decay +of +C-SoftTreeMax). For every Q +: +S × A +→ +R, the +C-SoftTreeMax policy gradient is bounded by +Var +� +∇θ log πC +d,θ(a|s)Q(s, a) +� +≤ 2 A2S2β2 +(1 − γ)2 γ2d|λ2(P πb)|2(d−1). +Although we provide a rigorous proof in Appendix A.1.4, +since the proof relatively accessible, we briefly outline its +essence here. +Proof outline. Lemma 4.1 allows us to bound the variance +using a direct bound on the gradient norm. The gradient is +given in Lemma 4.3 as a product of three matrices, which +we now study from right to left. The matrix P πb is a row- +stochastic matrix. Because the associated Markov chain is +irreducible and aperiodic, it has a unique stationary distribu- +tion. This implies that P πb has one and only one eigenvalue +equal to 1; all others have magnitude strictly less than 1. Let +us suppose that all these other eigenvalues have multiplicity +1 (the general case with repeated eigenvalues can be handled +via Jordan decompositions as in (Pelletier, 1998, Lemma1)). +Then, P πb has the spectral decomposition +P πb = 1Sµ⊤ +πb + +S +� +i=2 +λiviu⊤ +i , +where λi is the i-th eigenvalue of P πb (ordered in descend- +ing order according to their magnitude) and ui and vi are +the corresponding left and right eigenvectors, respectively. +Therefore, +(P πb)d−1 = 1Sµ⊤ +πb + +S +� +i=2 +λd−1 +i +viu⊤ +i . +(7) +The second matrix in the gradient relation in Lemma 4.3, Ps, +is a rectangular transition matrix that translates the vector +of all ones from dimension S to A : Ps1S = 1A. +Lastly, +the first matrix +� +IA − 1A(πC +d,θ)⊤� +is a pro- +jection whose null-space includes the vector 1A, i.e., +� +IA − 1A(πC +d,θ)⊤� +1A = 0. +Combining the three properties above when multiplying +the three matrices of the gradient, it is easy to see that +the first term in (7) gets canceled, and we are left with +bounded summands scaled by λi(P πb)d−1. Recalling that +|λi(P πb)| < 1 and that |λ2| > |λ3| > . . . for i = 2, . . . , S, +we obtain the desired result. + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +2 +4 +6 +8 +10 +Depth d +10 +31 +10 +26 +10 +21 +10 +16 +10 +11 +10 +6 +10 +1 +SoftTreeMax +Gradient variance +Permutation: True variance +Permutation: Variance bound +Random: True variance +Random: Variance bound +Uniform: True variance +Uniform: Variance bound +Figure 1. A comparison of the analytical PG variance and our +bound for E-SoftTreeMax on randomly drawn MDPs. We present +three cases for P πb : (i) close to uniform, (ii) drawn randomly, and +(iii) close to a permutation matrix. This experiment verifies the +optimal and worse-case rate decay cases. The variance bounds here +are taken from Theorem 4.7 where we substitute α = |λ2(P πb)|. +Theorem 4.4 guarantees that the variance of the gradient +decays exponentially with d, regardless of γ. It also pro- +vides a novel insight that drives us to choose the behavior +policy πb as the policy that minimizes the absolute second +eigenvalue of the P πb. Indeed, the second eigenvalue of a +Markov chain has known connections to its connectivity and +its rate of convergence to the stationary distribution (Levin +& Peres, 2017). +Optimal variance decay. To achieve the best reduction +in variance, the behavior policy πb should be chosen to +achieve uniformity. That is, that transitions induced by +the interaction of πb with the environment are uniform. In +that case, P πb is a rank one matrix of the form 1Sµ⊤ +πb, and +λ2(P πb) = 0. Then, Var (∇θ log πθ(a|s)Q(s, a)) = 0. As +we show in Section 5, we choose our tree expansion policy +accordingly. +Worst-case variance decay. In contrast, and somewhat +surprisingly, when πb is chosen so that the dynamics is +deterministic, there is no guarantee that it will decay expo- +nentially fast. For example, if P πb is a permutation matrix, +then λ2(P πb) = 1, and advancing the tree amounts to only +updating the gradient of one state for every action, as in the +basic softmax. +4.2. Variance of E-SoftTreeMax +The proof of the variance bound for E-SoftTreeMax is sim- +ilar to that of C-SoftTreeMax, but more involved. It also +requires the assumption that the reward depends only on the +state, i.e. r(s, a) ≡ r(s). This is indeed the case in most +standard RL environments such as Atari and Mujoco. +We begin with expressing E-SoftTreeMax in vector form. +Lemma 4.5 (Vector form of E-SoftTreeMax). For d ≥ 1, +(4) is given by +πE +d,θ(·|s) = +Es,d exp(βγdΘ) +1⊤ +AEs,d exp(βγdΘ), +(8) +where +Es,d = Ps +d−1 +� +h=1 +� +D +� +exp(βγhR) +� +P πb� +. +(9) +The vector R above is the S-dimensional vector whose s-th +coordinate is r(s). +The matrix Es,d ∈ RA×S has a similar role to Cs,d from (6), +but it represents the exponentiated cumulative discounted +reward. Accordingly, it is a product of d matrices as opposed +to a sum. It captures the expected reward sequence starting +from s and then iteratively following P πb. After d steps, we +apply the score function on the last state as in (8). +Lemma +4.6 +(Gradient +of +E-SoftTreeMax). +The +E-SoftTreeMax gradient of dimension A × S is given by +∇θ log πE +d,θ = +βγd � +IA − 1A(πE +d,θ)⊤� D +� +πE +d,θ +�−1 +Es,dD(exp(βγdΘ)) +1⊤ +AEs,d exp(βγdΘ) +, +where for brevity, we drop the s index in the policy above, +i.e., πE +d,θ ≡ πE +d,θ(·|s). +This gradient structure is harder to handle than that of +C-SoftTreeMax in Lemma 4.3, but here we also prove an +exponential variance decay nonetheless. +Theorem +4.7 +(Exponential +variance +decay +of +E-SoftTreeMax). There exists α +∈ +(0, 1) such that, +for any function Q : S × A → R, +Var +� +∇θ log πE +d,θ(a|s)Q(s, a) +� +∈ O +� +β2γ2dα2d� +. +If all rewards are equal (r ≡ const), then α = |λ2(P πb)|. +The proof structure is similar in spirit to that of Theorem 4.4, +but several new technical arguments are needed. We give it +in full in Appendix A.1.4, but briefly outline it here. +Proof outline. Recall that thanks to Lemma 4.1, we can +bound the PG variance using a direct bound on the gradient +norm. The definition of the induced norm is +∥∇θ log πE +d,θ∥ = max +z:∥z∥=1 ∥∇θ log πE +d,θz∥, + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +with ∇θ log πE +d,θ given in Lemma 4.6. Let z ∈ RS be an +arbitrary vector such that ∥z∥ = 1. Then, z = �S +i=1 cizi, +where ci are scalar coefficients and zi are vectors spanning +the S-dimensional space. In the full proof, we show our +specific choice of zi and prove they are linearly independent +given that choice. We do note that z1 = 1S. +The first part of the proof relies on the fact that +(∇θ log πE +d,θ)z1 = 0. This is easy to verify using Lemma 4.6 +together with (8), and because +� +IA − 1A(πE +d,θ)⊤� +is a pro- +jection matrix whose null-space is spanned by 1S. Thus, +∇θ log πE +d,θz = ∇θ log πE +d,θ +S +� +i=2 +cizi. +In the second part of the proof, we focus on Es,d from (9), +which appears within ∇θ log πE +d,θ. Notice that Es,d consists +of the product �d−1 +h=1 +� +D +� +exp(βγhR +� +P πb� +. Even though +the elements in this product are not stochastic matrices, in +the full proof we show how to normalize each of them to a +stochastic matrix Bh. We thus obtain that +Es,d = PsD(M1) +d−1 +� +h=1 +Bh, +where M1 ∈ RS is some strictly positive vector. Then, +we can apply a result by Mathkar & Borkar (2016), which +itself builds on (Chatterjee & Seneta, 1977). The result +states that the product of stochastic matrices �d−1 +h=1 Bh of +our particular form converges exponentially fast to a matrix +of the form 1Sµ⊤ s.t. ∥1Sµ⊤ − �d−1 +h=1 Bh∥ ≤ Cαd for +some constant C. +Lastly, 1Sµ⊤ +πb gets canceled due to our choice of zi, i = +2, . . . , S. This observation along with the above fact that the +remainder decays then shows that ∇θ log πE +d,θ +�S +i=2 zi = +O(αd), which gives the desired result. +Although our proof guarantees that α = |λ2(P πb)| only +in the constant-reward case, we conjecture that this is also +true in the general case. To demonstrate this, we run the +following simulation. We drew a random finite MDP, pa- +rameter vector Θ ∈ RS ++, and behavior policy πb. We then +analytically computed the PG variance of E-SoftTreeMax +as given in (1) and compared it to |λ2(P πb)|d. As seen, the +true variance and our bound matched almost identically. +This suggests that indeed α = |λ2(P πb)|. We repeat this +experiment three times for different P πb : (i) close to uni- +form, (ii) drawn randomly, and (iii) close to a permutation +matrix. The three cases match our takeaways on the opti- +mal and worst-case rate decay cases. We ran multiple such +experiments and in all of them the lines match closely; we +give here one such instance. To account the for constants, +we match the values for the first point in d = 1. +𝑊(𝑆!"# +$,$ ) +logits +for +logits +for +logits +for +𝑎!"$ +($) +Policy +network +𝑎!"$ +(() +𝑎!"$ +($) +𝑎!"$ +(() +𝑎!"$ +($) +𝑎!"$ +(() +𝑎!") +($) +𝑎!") +(#) +𝑎!") +(() +𝑎!"# +(%) +𝑎!"# +(') +𝑎!"# +(() +𝑆!") +𝑆!"$ +($) +𝑆!"$ +(#) +𝑆!"$ +(() +𝑆!"# +($,$) +𝑆!"# +(#,$) +𝑆!"# +($,() +𝑆!"# +(#,() +𝑆!"# +((,$) +𝑆!"# +((,() +𝑊(𝑆!"# +$,( ) +𝑊(𝑆!"# +#,$ ) +𝑊(𝑆!"# +#,( ) +𝑊(𝑆!"# +(,$ ) +𝑊(𝑆!"# +(,( ) +Figure 2. SoftTreeMax policy. Our exhaustive parallel TS ex- +pands all actions at each state up to depth d (= 2 here). The leaf +state of every trajectory is used as input to the policy network. +The output is then added to the trajectory’s cumulative reward as +described in (2). I.e., instead of the standard softmax logits, we +add the cumulative discounted reward to the policy network output. +This policy is differentiable and can be easily integrated into any +PG algorithm. In this work, we build on PPO and use its loss +function to train the policy network. +5. SoftTreeMax: Deep Parallel +Implementation +Following the success of deep RL (Mnih et al., 2015), deep +neural networks are used nowadays almost exclusively in +practice. Depending on the RL algorithm, a loss function +is defined and gradients on the network weights can be +calculated. In PG methods, the scoring function used in the +softmax is commonly replaced by a neural network Wθ: +πθ(a|s) ∝ exp (Wθ(s, a)) . +Similarly, we implement SoftTreeMax by replacing θ(s) in +(2) with a neural network Wθ(s). Although both variants of +SoftTreeMax from Section 3 involve computing an expecta- +tion, this can be hard in general. One approach to handle it is +with sampling, though these introduce estimation variance +into the process. We leave the question of sample-based +theory and algorithmic implementations for future work. +Instead, in finite action space environments such as Atari, +we compute the exact expectation in SoftTreeMax with an +exhaustive TS of depth d. Despite the exponential computa- +tional cost of spanning the entire tree, recent advancements +in parallel GPU-based simulation allow efficient expansion +of all nodes at the same depth simultaneously (Dalal et al., +2021; Rosenberg et al., 2022). This is possible when a simu- +lator is implemented on GPU (Dalton et al., 2020; Makoviy- +chuk et al., 2021; Freeman et al., 2021), or when a forward +model is learned (Kim et al., 2020; Ha & Schmidhuber, +2018). To reduce the complexity to be linear in depth, we +apply tree pruning to a limited width in all levels. We do so + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +by sampling only the most promising actions at each level. +To summarize, in the practical SoftTreeMax algorithm we +perform an exhaustive TS to obtain all trajectories up to +depth d. We expand the tree by exhaustively expanding +all actions, which corresponds to a uniform tree expansion +policy πb. We apply a neural network on the leaf states, +and accumulate the result with the rewards along each tra- +jectory to obtain the logits in (2). Finally, we aggregate +the results using C-SoftTreeMax. We leave experiments +E-SoftTreeMax for future work on risk-averse RL. During +training, the gradient propagates to the NN weights of Wθ. +When the gradient ∇θ log πd,θ is calculated at each time +step, it updates Wθ for all leaf states, similarly to Siamese +networks (Bertinetto et al., 2016). An illustration of the +policy is given in Figure 2. +6. Experiments +We conduct our experiments on multiple games from the +Atari simulation suite (Bellemare et al., 2013). As a baseline, +we train a PPO (Schulman et al., 2017) agent with 256 +workers in parallel. In a hyperparameter search, we found +this number of workers to be the best in terms of run-time. +The environment engine is the highly efficient Atari-CuLE +(Dalton et al., 2020), a CUDA-based version of Atari that +runs on GPU. Similarly, we use Atari-CuLE for the GPU- +based breadth-first TS as done in (Dalal et al., 2021). We +then train SoftTreeMax for depths d = 1 . . . 8, with a single +worker. We use five seeds for each experiment. +For the implementation, we extend Stable-Baselines3 (Raf- +fin et al., 2019) with all parameters taken as default from the +original PPO paper (Schulman et al., 2017). We will release +the code upon publication. For depths d ≥ 3, we limited +the tree to a maximum width of 1024 nodes and pruned +non-promising trajectories in terms of estimated weights. +Since the distributed PPO baseline advances significantly +faster in terms of environment steps, for a fair comparison, +we ran all experiments for one week on the same machine +and use the wall-clock time as the x-axis. We use Intel(R) +Xeon(R) CPU E5-2698 v4 @ 2.20GHz equipped with one +NVIDIA Tesla V100 32GB. +In Figure 3, we plot the reward and variance of SoftTreeMax +for each game, as a function of depth. The dashed lines are +the results for PPO. Each value is taken after convergence, +i.e., the average over the last 20% of the run. The numbers +represent the average over five seeds per game. We choose +to exclude the standard deviation to avoid excessive clutter +in the plot. The plot conveys three intriguing conclusions. +First, in all cases, SoftTreeMax achieves significantly higher +reward than PPO. Its gradient variance is also orders of +magnitude lower than that of PPO. Second, the reward and +variance are negatively correlated – they mirror each other +in almost all of the games. This phenomenon demonstrates +how crucial it is to lower the variance of PG for improving +performance. And specifically, it highlights the benefits of +SoftTreeMax over “flat” PG. The third conclusion is that +each game has a different sweet-spot in terms of optimal TS +depth. Recall that we limit the run-time in all experiments +to one week. The deeper the TS, the slower each step and +less steps are finished by the end of the run. This type of +comparison also explains why there is no reason to expect +monotone variance reduction as a function of depth. +We also provide the training curves in Figure 4. For brevity, +we exclude a few of the depths from the plots. As seen, there +is a clear benefit for SoftTreeMax over distributed PPO with +the standard softmax policy. In most games, PPO with the +SoftTreeMax policy shows very high sample efficiency: it +achieves higher episodic reward even though it observes +much less episodes, for the same running time. +7. Related Work +Our work intersects several fields of the RL literature: +Softmax Operator. The softmax policy became a canonical +part of PG to the point where theoretical results of PG focus +specifically on it (Zhang et al., 2021; Mei et al., 2020b; Li +et al., 2021; Schulman et al., 2017; Haarnoja et al., 2018). +Even though we focus on a tree extension to the softmax +policy, the methodology we propose is general and can be +easily applied to other discrete or continuous parameterized +policies as in (Mei et al., 2020a; Miahi et al., 2021). +Tree Search. Planning with a TS is the process of using a +forward model to consider possible future trajectories and +decide on the best action at the root. One famous such algo- +rithm is Monte-Carlo TS (MCTS; Browne et al. 2012) used +in AlphaGo (Silver et al., 2016) and MuZero (Schrittwieser +et al., 2020). Other principal algorithms such as Value Itera- +tion, Policy Iteration and DQN were also shown to give an +improved performance with a tree search extensions (Efroni +et al., 2019; Dalal et al., 2021). +Risk Aversion. Many works considered an exponential +utility function for risk aversion (Chen et al., 2007; Garcıa +& Fern´andez, 2015; Fei et al., 2021). This utility function +is the same as E-SoftTreeMax formulation from (4), but we +have it directly in the policy instead of the objective. +Reward-free RL. We showed that the gradient variance is +minimized when the transitions induced by the behavior +policy πb are uniform. This is expressed by the second +eigenvalue of the transition matrix P πb. This notion of +uniform exploration is common to the reward-free RL setup +(Jin et al., 2020). Several such works considered the same +second eigenvalue in their analysis (Liu & Brunskill, 2018; +Tarbouriech & Lazaric, 2019). + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +Figure 3. Reward and Gradient variance: GPU SoftTreeMax (single worker) vs PPO (256 GPU workers). The blue reward plots +show the average of 50 evaluation episodes. The red variance plots show the average gradient variance of the corresponding training runs, +averaged over five seeds. The dashed lines represent the same for PPO. Note theat the variance y-axis is in log-scale. The reward and +variance are negatively correlated and mirror each other in almost all games. This demonstrates the necessity to lower the variance of PG +for improving performance. We limit the training run-time in all experiments to one week. The deeper the TS, the slower each step and +less steps are finished by the end of the training run. This explains the non-monotone performance and variance as a function of depth. +Figure 4. Training curves: GPU SoftTreeMax (single worker) vs PPO (256 GPU workers). The plots show average reward and +standard deviation over five seeds. The x-axis is the wall-clock time. The runs ended after a maximum of 200M time-steps, and after no +longer than one week. The standard PPO finished in less than one week. The training curves correspond to the evaluation runs in Figure 3. +8. Discussion +Planning in RL is typically carried out with value-based +algorithms due to its seamless integration with the Bellman +operator, leaving aside the popular class of PG methods. In +this work, we introduced for the first time a differentiable +parametric policy that combines TS with PG. We prove that +SoftTreeMax is essentially an exponential variance reduc- +tion technique and provide novel insight on how to choose +the expansion policy to minimize the gradient variance. It +is an open question whether optimal variance reduction cor- +responds to the appealing regret properties tackled by UCT +(Kocsis & Szepesv´ari, 2006). +Mitigating +the +known +sample +inefficiency +issue, +SoftTreeMax +achieves +better +performance +than +the +widely used PPO with multiple workers and softmax policy. +Our method can be further applied to continuous control +tasks, or in tasks where the forward model is learned with +some estimation error. Other possible future directions +are: (i) to study the implications of sampling trajectories +instead of directly calculating their expectation; (ii) analyze +the convergence rate of SoftTreeMax, and (iii) to extend +SoftTreeMax to adaptively changing depths to optimize +run-time and performance. + +Asteroids +Gopher +Krull +Breakout +15000 +9000 +5000 ++10-5 +800 +10-5 +10-5 +p +10-7 +10000 +8000 + +10-6 +10-6 +10-7 +600 +601) +10-9 +5000 + 10-7 +7000- +3000 +10-7 +400 +SoftTreeMax Reward +6 +2 +4 +4 +6 +8 +2 +4 +8 +2 +6 +8 +2 +PPO Reward +Depth +Depth +Depth +Depth +SoftTreeMax Variance +Phoenix +VideoPinball +KungFuMaster +NameThisGame +PPO Variance +800000 + +10-5 +10-5 +20000 +75000 +600000 +10-6 +10-6 +15000 +50000 +400000 +: 601) +10-7 + 10-7 25000- +10-) +10000 +10-8 +200000- +40000卡 +9 +2 +2 +4 +6 +2 +4 +6 +8 +2 +4 +8 +4 +6 +8 +Depth +Depth +Depth +DepthAsteroids +Breakout +Gopher +Krull +8000- +6000 +400 + 3000 +6000 +Rewar +4000 +200 +R 2000 +2000 +4000 +PPO +10 +SoftTreeMax Depth 2 +100 +0 +100 +0 +100 +0 +100 +SoftTreeMax Depth 3 +Time [hours] +Time [hours] +Time [hours] +Time [hours] +SoftTreeMax Depth 5 +SoftTreeMax Depth 6 +KungFuMaster +NameThisGame +Phoenix +VideoPinball +SoftTreeMax Depth 8 +60000 +30000 +200000 +10000 +g + 40000 +20000 +Rewa +5000 +10000 +20000 +0 +100 +0 +100 +0 +100 +0 +100 +Time [hours] +Time [hours] +Time [hours] +Time [hours]SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +References +Agarwal, A., Kakade, S. M., Lee, J. D., and Mahajan, G. +On the theory of policy gradient methods: Optimality, +approximation, and distribution shift. J. Mach. Learn. +Res., 22(98):1–76, 2021. +Bellemare, M. G., Naddaf, Y., Veness, J., and Bowling, M. +The arcade learning environment: An evaluation plat- +form for general agents. Journal of Artificial Intelligence +Research, 47:253–279, 2013. +Bertinetto, L., Valmadre, J., Henriques, J. F., Vedaldi, A., +and Torr, P. H. Fully-convolutional siamese networks for +object tracking. In European conference on computer +vision, pp. 850–865. Springer, 2016. +Bhatnagar, S., Sutton, R. S., Ghavamzadeh, M., and Lee, +M. Natural actor–critic algorithms. Automatica, 45(11): +2471–2482, 2009. +Browne, C. B., Powley, E., Whitehouse, D., Lucas, S. M., +Cowling, P. I., Rohlfshagen, P., Tavener, S., Perez, D., +Samothrakis, S., and Colton, S. A survey of monte carlo +tree search methods. IEEE Transactions on Computa- +tional Intelligence and AI in games, 4(1):1–43, 2012. +Chatterjee, S. and Seneta, E. Towards consensus: Some +convergence theorems on repeated averaging. Journal of +Applied Probability, 14(1):89–97, 1977. +Chen, X., Sim, M., Simchi-Levi, D., and Sun, P. Risk +aversion in inventory management. Operations Research, +55(5):828–842, 2007. +Coulom, R. Efficient selectivity and backup operators in +monte-carlo tree search. In International conference on +computers and games, pp. 72–83. Springer, 2006. +Dalal, G., Hallak, A., Dalton, S., Mannor, S., Chechik, G., +et al. Improve agents without retraining: Parallel tree +search with off-policy correction. Advances in Neural +Information Processing Systems, 34:5518–5530, 2021. +Dalton, S. et al. +Accelerating reinforcement learning +through gpu atari emulation. Advances in Neural In- +formation Processing Systems, 33:19773–19782, 2020. +Efroni, Y., Dalal, G., Scherrer, B., and Mannor, S. How +to combine tree-search methods in reinforcement learn- +ing. In Proceedings of the AAAI Conference on Artificial +Intelligence, volume 33, pp. 3494–3501, 2019. +Fei, Y., Yang, Z., Chen, Y., and Wang, Z. Exponential +bellman equation and improved regret bounds for risk- +sensitive reinforcement learning. Advances in Neural +Information Processing Systems, 34:20436–20446, 2021. +Freeman, C. D., Frey, E., Raichuk, A., Girgin, S., Mordatch, +I., and Bachem, O. Brax-a differentiable physics en- +gine for large scale rigid body simulation. In Thirty-fifth +Conference on Neural Information Processing Systems +Datasets and Benchmarks Track (Round 1), 2021. +Garcıa, J. and Fern´andez, F. A comprehensive survey on safe +reinforcement learning. Journal of Machine Learning +Research, 16(1):1437–1480, 2015. +Greensmith, E., Bartlett, P. L., and Baxter, J. Variance reduc- +tion techniques for gradient estimates in reinforcement +learning. Journal of Machine Learning Research, 5(9), +2004. +Ha, D. and Schmidhuber, J. World models. arXiv preprint +arXiv:1803.10122, 2018. +Haarnoja, T., Zhou, A., Abbeel, P., and Levine, S. Soft +actor-critic: Off-policy maximum entropy deep reinforce- +ment learning with a stochastic actor. In International +conference on machine learning, pp. 1861–1870. PMLR, +2018. +Howard, R. A. and Matheson, J. E. Risk-sensitive markov +decision processes. Management science, 18(7):356–369, +1972. +Jin, C., Krishnamurthy, A., Simchowitz, M., and Yu, T. +Reward-free exploration for reinforcement learning. In +International Conference on Machine Learning, pp. 4870– +4879. PMLR, 2020. +Kim, S. W., Zhou, Y., Philion, J., Torralba, A., and Fi- +dler, S. Learning to simulate dynamic environments with +gamegan. In Proceedings of the IEEE/CVF Conference +on Computer Vision and Pattern Recognition, pp. 1231– +1240, 2020. +Kocsis, L. and Szepesv´ari, C. Bandit based monte-carlo +planning. In European conference on machine learning, +pp. 282–293. Springer, 2006. +Levin, D. A. and Peres, Y. Markov chains and mixing times, +volume 107. American Mathematical Soc., 2017. +Li, G., Wei, Y., Chi, Y., Gu, Y., and Chen, Y. Softmax +policy gradient methods can take exponential time to +converge. In Conference on Learning Theory, pp. 3107– +3110. PMLR, 2021. +Liu, Y. and Brunskill, E. When simple exploration is sample +efficient: Identifying sufficient conditions for random +exploration to yield pac rl algorithms. arXiv preprint +arXiv:1805.09045, 2018. +Liu, Y., Zhang, K., Basar, T., and Yin, W. An improved +analysis of (variance-reduced) policy gradient and natural + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +policy gradient methods. Advances in Neural Information +Processing Systems, 33:7624–7636, 2020. +Makoviychuk, V., Wawrzyniak, L., Guo, Y., Lu, M., Storey, +K., Macklin, M., Hoeller, D., Rudin, N., Allshire, A., +Handa, A., et al. Isaac gym: High performance gpu-based +physics simulation for robot learning. arXiv preprint +arXiv:2108.10470, 2021. +Mathkar, A. S. and Borkar, V. S. Nonlinear gossip. SIAM +Journal on Control and Optimization, 54(3):1535–1557, +2016. +Mei, J., Xiao, C., Dai, B., Li, L., Szepesv´ari, C., and Schu- +urmans, D. Escaping the gravitational pull of softmax. +Advances in Neural Information Processing Systems, 33: +21130–21140, 2020a. +Mei, J., Xiao, C., Szepesvari, C., and Schuurmans, D. On +the global convergence rates of softmax policy gradient +methods. In International Conference on Machine Learn- +ing, pp. 6820–6829. PMLR, 2020b. +Miahi, E., MacQueen, R., Ayoub, A., Masoumzadeh, A., +and White, M. Resmax: An alternative soft-greedy oper- +ator for reinforcement learning. 2021. +Mnih, V., Kavukcuoglu, K., Silver, D., Rusu, A. A., Veness, +J., Bellemare, M. G., Graves, A., Riedmiller, M., Fidje- +land, A. K., Ostrovski, G., et al. Human-level control +through deep reinforcement learning. nature, 518(7540): +529–533, 2015. +Noorani, E. and Baras, J. S. Risk-sensitive reinforce: A +monte carlo policy gradient algorithm for exponential +performance criteria. In 2021 60th IEEE Conference +on Decision and Control (CDC), pp. 1522–1527. IEEE, +2021. +Papini, M., Binaghi, D., Canonaco, G., Pirotta, M., and +Restelli, M. Stochastic variance-reduced policy gradi- +ent. In International conference on machine learning, pp. +4026–4035. PMLR, 2018. +Pelletier, M. On the almost sure asymptotic behaviour of +stochastic algorithms. Stochastic processes and their +applications, 78(2):217–244, 1998. +Pham, N., Nguyen, L., Phan, D., Nguyen, P. H., Dijk, M., +and Tran-Dinh, Q. A hybrid stochastic policy gradient +algorithm for reinforcement learning. In International +Conference on Artificial Intelligence and Statistics, pp. +374–385. PMLR, 2020. +Puterman, M. L. +Markov decision processes: discrete +stochastic dynamic programming. John Wiley & Sons, +2014. +Raffin, A., Hill, A., Ernestus, M., Gleave, A., Kanervisto, +A., and Dormann, N. Stable baselines3, 2019. +Rosenberg, A., Hallak, A., Mannor, S., Chechik, G., and +Dalal, G. Planning and learning with adaptive lookahead. +arXiv preprint arXiv:2201.12403, 2022. +Schrittwieser, J., Antonoglou, I., Hubert, T., Simonyan, K., +Sifre, L., Schmitt, S., Guez, A., Lockhart, E., Hassabis, +D., Graepel, T., et al. Mastering atari, go, chess and shogi +by planning with a learned model. Nature, 588(7839): +604–609, 2020. +Schulman, J., Wolski, F., Dhariwal, P., Radford, A., and +Klimov, O. Proximal policy optimization algorithms. +arXiv preprint arXiv:1707.06347, 2017. +Shen, Z., Ribeiro, A., Hassani, H., Qian, H., and Mi, C. Hes- +sian aided policy gradient. In International conference +on machine learning, pp. 5729–5738. PMLR, 2019. +Silver, D. Reinforcement learning and simulation-based +search in computer go. 2009. +Silver, D., Huang, A., Maddison, C. J., Guez, A., Sifre, L., +Van Den Driessche, G., Schrittwieser, J., Antonoglou, I., +Panneershelvam, V., Lanctot, M., et al. Mastering the +game of go with deep neural networks and tree search. +nature, 529(7587):484–489, 2016. +Silver, D., Hubert, T., Schrittwieser, J., Antonoglou, I., Lai, +M., Guez, A., Lanctot, M., Sifre, L., Kumaran, D., Grae- +pel, T., et al. A general reinforcement learning algorithm +that masters chess, shogi, and go through self-play. Sci- +ence, 362(6419):1140–1144, 2018. +Sutton, R. S., McAllester, D., Singh, S., and Mansour, Y. +Policy gradient methods for reinforcement learning with +function approximation. Advances in neural information +processing systems, 12, 1999. +Szepesv´ari, C. Algorithms for reinforcement learning. Syn- +thesis lectures on artificial intelligence and machine +learning, 4(1):1–103, 2010. +Tarbouriech, J. and Lazaric, A. Active exploration in markov +decision processes. In The 22nd International Confer- +ence on Artificial Intelligence and Statistics, pp. 974–982. +PMLR, 2019. +Thomas, P. S. and Brunskill, E. +Policy gradient meth- +ods for reinforcement learning with function approxi- +mation and action-dependent baselines. arXiv preprint +arXiv:1706.06643, 2017. +Weaver, L. and Tao, N. The optimal reward baseline for +gradient-based reinforcement learning. In Proceedings +of the Seventeenth conference on Uncertainty in artificial +intelligence, pp. 538–545, 2001. + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +Wu, C., Rajeswaran, A., Duan, Y., Kumar, V., Bayen, A. M., +Kakade, S., Mordatch, I., and Abbeel, P. Variance reduc- +tion for policy gradient with action-dependent factorized +baselines. In International Conference on Learning Rep- +resentations, 2018. +Xu, P., Gao, F., and Gu, Q. An improved convergence +analysis of stochastic variance-reduced policy gradient. +In Uncertainty in Artificial Intelligence, pp. 541–551. +PMLR, 2020. +Ye, W., Liu, S., Kurutach, T., Abbeel, P., and Gao, Y. Mas- +tering atari games with limited data. Advances in Neural +Information Processing Systems, 34:25476–25488, 2021. +Zhang, J., Ni, C., Szepesvari, C., Wang, M., et al. On the +convergence and sample efficiency of variance-reduced +policy gradient method. Advances in Neural Information +Processing Systems, 34:2228–2240, 2021. + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +A. Appendix +A.1. Proofs +A.1.1. PROOF OF LEMMA 4.1 – BOUND ON THE POLICY GRADIENT VARIANCE +For any parametric policy πθ and function Q : S × A → R, +Var (∇θ log πθ(a|s)Q(s, a)) ≤ max +s,a [Q(s, a)]2 max +s +||∇θ log πθ(·|s)||2 +F , +where ∇θ log πθ(·|s) ∈ RA×dim(θ) is a matrix whose a-th row is ∇θ log πθ(a|s)⊤. +Proof. The variance for a parametric policy πθ is given as follows: +Var (∇θ log πθ(a|s)Q(a, s)) =Es∼µπθ ,a∼πθ(·|s) +� +∇θ log πθ(a|s)⊤∇θ log πθ(a|s)Q(s, a)2� +− +Es∼ρπθ ,a∼πθ(·|s) [∇θ log πθ(a|s)Q(s, a)]⊤ Es∼µπθ ,a∼πθ(·|s) [∇θ log πθ(a|s)Q(s, a)] , +where Q(s, a) is the currently estimated Q-function and µπθ is the stationary distribution induced by following the policy +πθ. Since the second term we subtract is always positive (it is of quadratic form v⊤v) we can bound the variance by the first +term: +Var (∇θ log πθ(a|s)Q(a, s)) ≤Es∼µπθ ,a∼πθ(·|s) +� +∇θ log πθ(a|s)⊤∇θ log πθ(a|s)Q(s, a)2� += +� +s +µπθ(s) +� +a +πθ(a|s)∇θ log πθ(a|s)⊤∇θ log πθ(a|s)Q(s, a)2 +≤ max +s,a +� +[Q(s, a)]2 πθ(a|s) +� � +s +µπθ(s) +� +a +∇θ log πθ(a|s)⊤∇θ log πθ(a|s) +≤ max +s,a [Q(s, a)]2 max +s +� +a +∇θ log πθ(a|s)⊤∇θ log πθ(a|s) += max +s,a [Q(s, a)]2 max +s +||∇θ log πθ(·|s)||2 +F . +A.1.2. PROOF OF LEMMA 4.2 – VECTOR FORM OF C-SOFTTREEMAX +In vector form, (3) is given by +πC +d,θ(·|s) = +exp +� +β +� +Cs,d + γdPs (P πb)d−1 Θ +�� +1⊤ +A exp +� +β +� +Cs,d + γdPs (P πb)d−1 Θ +��, +(10) +where +Cs,d = Rs + Ps +�d−1 +� +h=1 +γh (P πb)h−1 +� +Rπb. +(11) +Proof. Consider the vector ℓs,· ∈ R|A|. Its expectation satisfies +Eπbℓs,·(d; θ) = Eπb +�d−1 +� +t=0 +γtrt + γdθ(sd) +� += Rs + +d−1 +� +t=1 +γtPs(P πb)t−1Rπb + γdPs(P πb)d−1Θ. +As required. + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +A.1.3. PROOF OF LEMMA 4.3 – GRADIENT OF C-SOFTTREEMAX +The C-SoftTreeMax gradient of dimension A × S is given by +∇θ log πC +d,θ = βγd � +IA − 1A(πC +d,θ)⊤� +Ps (P πb)d−1 , +where for brevity, we drop the s index in the policy above, i.e., πC +d,θ ≡ πC +d,θ(·|s). +Proof. The (j, k)-th entry of ∇θ log πC +d,θ satisifes +[∇θ log πC +d,θ]j,k = +∂ log(πC +d,θ(aj|s)) +∂θ(sk) += βγd[Ps(P πb)d−1]j,k − +� +a +� +exp +� +β +� +Cs,d + γdPs (P πb)d−1 Θ +��� +a βγd � +Ps(P πb)d−1� +a,k +1⊤ +A exp +� +β +� +Cs,d + γdPs (P πb)d−1 Θ +�� += βγd[Ps(P πb)d−1]j,k − βγd � +a +πC +d,θ(a|s) +� +Ps(P πb)d−1� +a,k += βγd[Ps(P πb)d−1]j,k − βγd � +(πC +d,θ)⊤Ps(P πb)d−1� +k += βγd[Ps(P πb)d−1]j,k − βγd � +1A(πC +d,θ)⊤Ps(P πb)d−1� +j,k . +Now, moving back to matrix form, we obtain the lemma. +A.1.4. PROOF OF THEOREM 4.4 – EXPONENTIAL VARIANCE DECAY OF C-SOFTTREEMAX +The C-SoftTreeMax policy gradient is bounded by +Var +� +∇θ log πC +d,θ(a|s)Q(s, a) +� +≤ 2 A2S2β2 +(1 − γ)2 γ2d|λ2(P πb)|2(d−1). +Proof. We use Lemma 4.1 directly. First of all, it is know that when the reward is bounded in [0, 1], the maximal value of +the Q-function is +1 +1−γ as the sum as infinite discounted rewards. Next, we bound the Frobenius norm of the term achieved in +Lemma 4.3, by applying the eigen-decomposition on P πb: +P πb = 1Sµ⊤ + +S +� +i=2 +λiuiv⊤ +i , +(12) +where µ is the stationary distribution of P πb, and ui and vi are left and right eigenvectors correspondingly. +||βγd � +IA,A − 1Aπ⊤� +Ps(P πb)d−1||F = βγd|| +� +IA,A − 1Aπ⊤� +Ps +� +1Sµ⊤ + +S +� +i=2 +λd−1 +i +uiv⊤ +i +� +||F +(Ps is stochastic) += βγd|| +� +IA,A − 1Aπ⊤� +� +1Aµ⊤ + +S +� +i=2 +λd−1 +i +Psuiv⊤ +i +� +||F +(projection nullifies 1Aµ⊤) += βγd|| +� +IA,A − 1Aπ⊤� +� S +� +i=2 +λd−1 +i +Psuiv⊤ +i +� +||F +(triangle inequality) +≤ βγd +S +� +i=2 +|| +� +IA,A − 1Aπ⊤� � +λd−1 +i +Psuiv⊤ +i +� +||F +(matrix norm sub-multiplicativity) +≤ βγd|λd−1 +2 +| +S +� +i=2 +||IA,A − 1Aπ⊤||F ||Ps||F ||uiv⊤ +i ||F += βγd|λd−1 +2 +|(S − 1)||IA,A − 1Aπ⊤||F ||Ps||F + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +Now we can bound the norm ||IA,A − 1Aπ⊤||F by direct calculation: +||IA,A − 1Aπ⊤||2 +F = Tr +�� +IA,A − 1Aπ⊤� � +IA,A − 1Aπ⊤�⊤� +(13) += Tr +� +IA,A − 1Aπ⊤ − π1⊤ +A + π⊤π1A1⊤ +A +� +(14) += A − 1 − 1 + Aπ⊤π +(15) +≤ 2A +(16) +And from the Cauchy-Schwartz inequality: +||Ps||2 +F = +� +a +� +s +[[Ps]a,s]2 = +� +a +||[Ps]a,·||2 +2 ≤ +� +a +||[Ps]a,·||1||[Ps]a,·||∞ ≤ A. +So: +Var +� +∇θ log πC +d,θ(a|s)Q(s, a) +� +≤ max +s,a [Q(s, a)]2 max +s +||∇θ log πC +d,θ(·|s)||2 +F +≤ +1 +(1 − γ)2 ||βγd � +IA,A − 1Aπ⊤� +Ps(P πb)d−1||2 +F +≤ +1 +(1 − γ)2 β2γ2d|λ2(P πb)|2(d−1)S2(2A2) +Which obtains the desired bound. +A.1.5. A LOWER BOUND ON C-SOFTTREEMAX GRADIENT (RESULT NOT IN THE PAPER) +For completeness we also supply a lower bound on the Frobenius norm of the gradient. Note that this result does not +translate to the a lower bound on the variance since we have no lower bound equivalence of Lemma 4.1. +Lemma A.1. The Frobenius norm on the gradient of the policy is lower-bounded by: +||∇θ log πC +d,θ(·|s)||F ≥ C · βγd|λ2(P πb)|(d−1). +(17) +Proof. We begin by moving to the induced l2 norm by norm-equivalence: +||βγd � +IA,A − 1Aπ⊤� +Ps(P πb)d−1||F ≥ ||βγd � +IA,A − 1Aπ⊤� +Ps(P πb)d−1||2 +Now, taking the vector u to be the eigenvector of the second eigenvalue of P πb: +||βγd � +IA,A − 1Aπ⊤� +Ps(P πb)d−1||2 ≥ ||βγd � +IA,A − 1Aπ⊤� +Ps(P πb)d−1u||2 += βγd|| +� +IA,A − 1Aπ⊤� +Psu||2 += βγd|λ2(P πb)|(d−1)|| +� +IA,A − 1Aπ⊤� +Psu||2 +Note that even though Psu can be 0, that is not the common case since we can freely change πb (and therefore the +eigenvectors of P πb). +A.1.6. PROOF OF LEMMA 4.5 – VECTOR FORM OF E-SOFTTREEMAX +For d ≥ 1, (4) is given by +πE +d,θ(·|s) = +Es,d exp(βγdΘ) +1⊤ +AEs,d exp(βγdΘ), +(18) +where +Es,d = Ps +d−1 +� +h=1 +� +D +� +exp[βγhR] +� +P πb� +(19) +with R being the |S|-dimensional vector whose s-th coordinate is r(s). + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +Proof. Recall that +ℓs,a(d; θ) = r(s) + +d−1 +� +t=1 +γtr(st) + γdθ(sd). +(20) +and, hence, +exp[βℓs,a(d; θ)] = exp +� +β +� +r(s) + +d−1 +� +t=1 +γtr(st) + γdθ(sd) +�� +. +(21) +Therefore, +E[exp βℓs,a(d; θ)] = E +� +exp +� +β +� +r(s) + +d−1 +� +t=1 +γtr(st) +�� +E +� +exp +� +β +� +γdθ(sd) +����s1, . . . , sd−1 +� +� +(22) += E +� +exp +� +β +� +r(s) + +d−1 +� +t=1 +γtr(st) +�� +P πb(·|sd−1) +� +exp(βγdΘ) +(23) += E +� +exp +� +β +� +r(s) + +d−2 +� +t=1 +γtr(st) +�� +exp[βγd−1r(sd−1)]P πb(·|sd−1) +� +exp(βγdΘ). +(24) +By repeatedly using iterative conditioning as above, the desired result follows. Note that exp(βr(s)) does not depend on the +action and is therefore cancelled out with the denominator. +A.1.7. PROOF OF LEMMA 4.6 – GRADIENT OF E-SOFTTREEMAX +The E-SoftTreeMax gradient of dimension A × S is given by +∇θ log πE +d,θ = βγd � +IA − 1A(πE +d,θ)⊤� D +� +πE +d,θ +�−1 +Es,dD(exp(βγdΘ)) +1⊤ +AEs,d exp(βγdΘ) +, +where for brevity, we drop the s index in the policy above, i.e., πE +d,θ ≡ πE +d,θ(·|s). +Proof. The (j, k)-th entry of ∇θ log πE +d,θ satisfies +[∇θ log πE +d,θ]j,k = +∂ log(πE +d,θ(aj|s)) +∂θ(sk) += +∂ +∂θ(sk) +� +log[(Es,d)⊤ +j exp(βγdΘ)] − log[1⊤ +AEs,d exp(βγdΘ)] +� += βγd(Es,d)j,k exp(βγdθ(sk)) +(Es,d)⊤ +j exp(βγdΘ) +− βγd1⊤ +AEs,dek exp(βγdθ(sk)) +1⊤ +AEs,d exp(βγdΘ) += βγd(Es,dek exp(βγdθ(sk)))j +(Es,d)⊤ +j exp(βγdΘ) +− βγd1⊤ +AEs,dek exp(βγdθ(sk)) +1⊤ +AEs,d exp(βγdΘ) += βγd +� +e⊤ +j +e⊤ +j Es,d exp(βγdΘ) − +1⊤ +A +1⊤ +AEs,d exp(βγdΘ) +� +Es,dek exp(βγdθ(sk)). +Hence, +[∇θ log πE +d,θ]·,k = βγd � +D(Es,d exp(βγdΘ))−1 − (1⊤ +AEs,d exp(βγdΘ))−11A1⊤ +A +� +Es,dek exp(βγdθ(sk)) +From this, it follows that +∇θ log πE +d,θ = βγd � +D +� +πE +d,θ +�−1 − 1A1⊤ +A +� Es,dD(exp(βγdΘ)) +1⊤ +AEs,d exp(βγdΘ) +. +(25) +The desired result is now easy to see. + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +A.1.8. PROOF OF THEOREM 4.7 — EXPONENTIAL VARIANCE DECAY OF E-SOFTTREEMAX +There exists α ∈ (0, 1) such that, for any function Q : S × A → R, +Var +� +∇θ log πE +d,θ(a|s)Q(s, a) +� +∈ O +� +β2γ2dα2d� +. +If all rewards are equal (r ≡ const), then α = |λ2(P πb)|. +Proof. Let d ≥ 2. Recall that +Es,d = Ps +d−1 +� +h=1 +� +D +� +exp[βγhR] +� +P πb� +, +(26) +and that R refers to the S-dimensional vector whose s-th coordinate is r(s). Define +Bi = +� +P πb +if i = d − 1, +D−1(P πbMi+1)P πbD(Mi+1) +if i = 1, . . . , d − 2, +(27) +and the vector +Mi = +� +exp(βγd−1R) +if i = d, +exp(βγiR) ◦ P πbMi+1 +if i = 1, . . . , d − 2, +(28) +where ◦ denotes the element-wise product. Then, +Es,d = PsD(M1) +d−1 +� +i=1 +Bi. +(29) +It is easy to see that each Bi is a row-stochastic matrix, i.e., all entries are non-negative and Bi1S = 1S. +Next, we prove that all non-zeros entries of Bi are bounded away from 0 by a constant. This is necessary to apply the next +result from (Chatterjee & Seneta, 1977). The j-th coordinate of Mi satisfies +(Mi)j = exp[βγiRj] +� +k +[P πb]j,k(Mi+1)k ≤ ∥ exp[βγiR]∥∞∥Mi+1∥∞. +(30) +Separately, observe that ∥Md−1∥∞ ≤ ∥ exp(βγd−1R)∥∞. Plugging these relations in (28) gives +∥M1∥∞ ≤ +d−1 +� +h=1 +∥ exp[βγhR]∥∞ = +d−1 +� +h=1 +∥ exp[βR]∥γh +∞ = ∥ exp[βR]∥ +�d−1 +h=1 γh +∞ +≤ ∥ exp[βR]∥ +1 +1−γ +∞ . +(31) +Similarly, for every 1 ≤ i ≤ d − 1, we have that +∥Mi∥∞ ≤ +d−1 +� +h=i +∥ exp[βR]∥γh +∞ ≤ ∥ exp[βR]∥ +1 +1−γ +∞ . +(32) +The jk-th entry of Bi = D−1(P πbMi+1)P πbD(Mi+1) is +(Bi)jk = +P πb +jk [Mi+1]k +�|S| +ℓ=1 P πb +jℓ [Mi+1]ℓ +≥ +P πb +jk +�|S| +ℓ=1 P πb +jℓ [Mi+1]ℓ +≥ +P πb +jk +∥ exp[βR]∥ +1 +1−γ +∞ +. +(33) +Hence, for non-zero P πb +jk , the entries are bounded away from zero by the same. We can now proceed with applying the +following result. +Now, by (Chatterjee & Seneta, 1977, Theorem 5) (see also (14) in (Mathkar & Borkar, 2016)), limd→∞ +�d−1 +i=1 Bi exists and +is of the form 1Sµ⊤ for some probability vector µ. Furthermore, there is some α ∈ (0, 1) such that ε(d) := +��d−1 +i=1 Bi +� +− +1S µ⊤ satisfies +∥ε(d)∥ = O(αd). +(34) + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +Pick linearly independent vectors w2, . . . , wS such that +µ⊤wi = 0 for i = 2, . . . , d. +(35) +Since �S +i=2 αiwi is perpendicular to µ for any α2, . . . αS and because µ⊤ exp(βγdΘ) > 0, there exists no choice of +α2, . . . , αS such that �S +i=2 αiwi = exp(βγdΘ). Hence, if we let z1 = 1S and zi = D(exp(βγdΘ))−1wi for i = 2, . . . , S, +then it follows that {z1, . . . , zS} is linearly independent. In particular, it implies that {z1, . . . , zS} spans RS. +Now consider an arbitrary unit norm vector z := �S +i=1 cizi ∈ RS s.t. ∥z∥2 = 1. Then, +∇θ log πE +d,θz = ∇θ log πE +d,θ +S +� +i=2 +cizi +(36) += βγd � +IA − 1A(πE +d,θ)⊤� D +� +πE +d,θ +�−1 +Es,dD(exp(βγdΘ)) +1⊤ +AEs,d exp(βγdΘ) +S +� +i=2 +cizi +(37) += βγd � +IA − 1A(πE +d,θ)⊤� D +� +πE +d,θ +�−1 +Es,d +1⊤ +AEs,d exp(βγdΘ) +S +� +i=2 +ciwi +(38) += βγd � +IA − 1A(πE +d,θ)⊤� D +� +πE +d,θ +�−1 � +1Sµ⊤ + ε(d) +� +1⊤ +AEs,d exp(βγdΘ) +S +� +i=2 +ciwi +(39) += βγd � +IA − 1A(πE +d,θ)⊤� D +� +πE +d,θ +�−1 +ε(d) +1⊤ +AEs,d exp(βγdΘ) +S +� +i=2 +ciwi +(40) += βγd � +IA − 1A(πE +d,θ)⊤� D +� +πE +d,θ +�−1 +ε(d)D(exp(βγdΘ)) +1⊤ +AEs,d exp(βγdΘ) +(z − c11S), +(41) +where (36) follows from the fact that ∇θ log πE +d,θz1 = ∇θ log πE +d,θ1S = 0, (37) follows from Lemma 4.6, (38) holds +since zi = D(exp(βγdΘ))−1wi, (40) because µ is perpendicular wi for each i, while (41) follows by reusing zi = +D(exp(βγdΘ))−1wi relation along with the fact that z1 = 1S. + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +From (41), it follows that +∥∇θ log πE +d,θz∥ ≤ βγd∥ε(d)∥ +������� +� +IA − 1A(πE +d,θ)⊤� +D +� +πE +d,θ +�−1 +1⊤ +AEs,d exp(βγdΘ) +������� +∥D(exp(βγdΘ))∥ ∥z − c11S∥ +(42) +≤ βγdαd(∥IA∥ + ∥1A(πE +d,θ)⊤∥) +������� +D +� +πE +d,θ +�−1 +1⊤ +AEs,d exp(βγdΘ) +������� +exp(βγd max +s +θ(s))∥z − c11S∥ +(43) +≤ βγdαd(1 + +√ +A) +������� +D +� +πE +d,θ +�−1 +1⊤ +AEs,d exp(βγdΘ) +������� +exp(βγd max +s +θ(s))∥z − c11S∥ +(44) +≤ βγdαd(1 + +√ +A) +��D−1(Es,d exp(βγdΘ)) +�� exp(βγd max +s +θ(s))∥z − c11S∥ +(45) +≤ βγdαd(1 + +√ +A) +1 +mins[Es,d exp(βγdΘ]s +exp(βγd max +s +θ(s))∥z − c11S∥ +(46) +≤ βγdαd(1 + +√ +A) +exp(βγd maxs θ(s)) +exp(βγd mins θ(s)) mins |M1|∥z − c11S∥ +(47) +≤ βγdαd(1 + +√ +A) +exp(βγd maxs θ(s)) +exp(βγd mins θ(s)) exp(β mins r(s))∥z − c11S∥ +(48) +≤ βγdαd(1 + +√ +A) exp(β[max +s +θ(s) − min +s +θ(s) − min +s +r(s)])∥z − c11S∥. +(49) +Lastly, we prove that ∥z−c11S∥ is bounded independently of d. First, denote by c = (c1, . . . , cS)⊤ and ˜c = (0, c2, . . . , cS)⊤. +Also, denote by Z the matrix with zi as its i-th column. Now, +∥z − c11S∥ = ∥ +S +� +i=2 +cizi∥ +(50) += ∥Z˜c∥ +(51) +≤ ∥Z∥∥˜c∥ +(52) +≤ ∥Z∥∥c∥ +(53) += ∥Z∥∥Z−1z∥ +(54) +≤ ∥Z∥∥Z−1∥, +(55) +where the last relation is due to z being a unit vector. All matrix norms here are l2-induced norms. +Next, denote by W the matrix with wi in its i-th column. Recall that in (35) we only defined w2, . . . , wS. We now set +w1 = exp(βγdΘ). Note that w1 is linearly independent of {w2, . . . , wS} because of (35) together with the fact that +µ⊤w1 > 0. We can now express the relation between Z and W by Z = D−1(exp(βγdΘ))W. Substituting this in (55), we +have +∥z − c11S∥ ≤ ∥D−1(exp(βγdΘ))W∥∥W −1D(exp(βγdΘ))∥ +(56) +≤ ∥W∥∥W −1∥∥D(exp(βγdΘ))∥∥D−1(exp(βγdΘ))∥. +(57) +It further holds that +∥D(exp(βγdΘ))∥ ≤ max +s +exp +� +βγdθ(s) +� +≤ max{1, exp[β max +s +θ(s)])}, +(58) +where the last relation equals 1 if θ(s) < 0 for all s. Similarly, +∥D−1(exp(βγdΘ))∥ ≤ +1 +mins exp (βγdθ(s)) ≤ +1 +min{1, exp[β mins θ(s)])}. +(59) + +SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search +Furthermore, by the properties of the l2-induced norm, +∥W∥2 ≤ +√ +S∥W∥1 +(60) += +√ +S max +1≤i≤S ∥wi∥1 +(61) += +√ +S max{exp(βγdΘ), max +2≤i≤S ∥wi∥1} +(62) +≤ +√ +S max{1, exp[β max +s +θ(s)], max +2≤i≤S ∥wi∥1)}. +(63) +Lastly, +∥W −1∥ = +1 +σmin(W) +(64) +≤ +�S−1 +� +i=1 +σmax(W) +σi(W) +� +1 +σmin(W) +(65) += (σmax(W))S−1 +�S +i=1 σi(W) +(66) += ∥W∥S−1 +| det(W)|. +(67) +The determinant of W is a sum of products involving its entries. To upper bound (67) independently of d, we lower bound +its determinant by upper and lower bounds on the entries [W]i,1 that are independent of d, depending on their sign: +min{1, exp[β min +s +θ(s)])} ≤ [W]i,1 ≤ max{1, exp[β max +s +θ(s)])}. +(68) +Using this, together with (55), (57), (58), (59), and (63), we showed that ∥z − c11S∥ is upper bounded by a constant +independent of d. This concludes the proof. + diff --git a/09FQT4oBgHgl3EQfEDVH/content/tmp_files/load_file.txt b/09FQT4oBgHgl3EQfEDVH/content/tmp_files/load_file.txt new file mode 100644 index 0000000000000000000000000000000000000000..55a8743c58fa676d01c00e3484a21803145fe741 --- /dev/null +++ b/09FQT4oBgHgl3EQfEDVH/content/tmp_files/load_file.txt @@ -0,0 +1,1306 @@ +filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf,len=1305 +page_content='SoftTreeMax: Exponential Variance Reduction in Policy Gradient via Tree Search Gal Dalal * Assaf Hallak * Gugan Thoppe Shie Mannor Gal Chechik Abstract Despite the popularity of policy gradient meth- ods, they are known to suffer from large vari- ance and high sample complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' To mitigate this, we introduce SoftTreeMax – a generaliza- tion of softmax that takes planning into account.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' In SoftTreeMax, we extend the traditional logits with the multi-step discounted cumulative reward, topped with the logits of future states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' We con- sider two variants of SoftTreeMax, one for cumu- lative reward and one for exponentiated reward.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' For both, we analyze the gradient variance and reveal for the first time the role of a tree expan- sion policy in mitigating this variance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' We prove that the resulting variance decays exponentially with the planning horizon as a function of the expansion policy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' Specifically, we show that the closer the resulting state transitions are to uni- form, the faster the decay.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' In a practical imple- mentation, we utilize a parallelized GPU-based simulator for fast and efficient tree search.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' Our differentiable tree-based policy leverages all gra- dients at the tree leaves in each environment step instead of the traditional single-sample-based gra- dient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' We then show in simulation how the vari- ance of the gradient is reduced by three orders of magnitude, leading to better sample complex- ity compared to the standard policy gradient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' On Atari, SoftTreeMax demonstrates up to 5x better performance in a faster run time compared to dis- tributed PPO.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' Lastly, we demonstrate that high reward correlates with lower variance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' Introduction Policy Gradient (PG;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' Sutton et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' 1999) methods for Re- inforcement Learning (RL) are often the first choice for environments that allow numerous interactions at a fast pace (Schulman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=', 2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' Their success is attributed to several Equal contribution .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/09FQT4oBgHgl3EQfEDVH/content/2301.13236v1.pdf'} +page_content=' Correspondence to: Gal Dalal