Commit a1e0bd69 by Ruizhi Chen

Update theory.tex

parent c0b7377e
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
\STATE $P(\hat{t}|s) = \pi^L_{old}(\hat{t}|s)$ \STATE $P(\hat{t}|s) = \pi^L_{old}(\hat{t}|s)$
\STATE Sample $\hat{t}$ with $P(\hat{t}|s)$ \STATE Sample $\hat{t}$ with $P(\hat{t}|s)$
\STATE Get reward $r(\hat{t},t)$ \STATE Get reward $r(\hat{t},t)$
\STATE $J(\theta^S,\theta^L)=E_{\pi_{old}^S,\pi^L}[r(\hat{t},t)\cdot\frac{\pi^L(s|t)}{\pi^L_{old}(s|t)}]$ \STATE $J(\theta^S,\theta^L)=E_{\pi^S,\pi_{old}^L}[r(\hat{t},t)\cdot\frac{\pi^L(s|t)}{\pi^L_{old}(s|t)}]$
\STATE Update $\theta^L$ by $\bigtriangledown_{\theta^L}J$ \STATE Update $\theta^L$ by $\bigtriangledown_{\theta^L}J$
\ENDFOR \ENDFOR
\STATE $\pi_{old}^L\leftarrow \pi^L$ \STATE $\pi_{old}^L\leftarrow \pi^L$
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment