{"id":1968,"date":"2018-01-26T20:58:13","date_gmt":"2018-01-26T11:58:13","guid":{"rendered":"http:\/\/now0930.tk\/wordpress\/?p=1968"},"modified":"2018-01-26T20:58:13","modified_gmt":"2018-01-26T11:58:13","slug":"monte-carlo-method-%ec%bd%94%eb%93%9c-%eb%b6%84%ec%84%9d","status":"publish","type":"post","link":"https:\/\/now0930.pe.kr\/wordpress\/monte-carlo-method-%ec%bd%94%eb%93%9c-%eb%b6%84%ec%84%9d\/","title":{"rendered":"Monte Carlo Method \ucf54\ub4dc \ubd84\uc11d"},"content":{"rendered":"<p>\uc5ec\uae30\uc5d0 \uc788\ub294 <a href=\"https:\/\/github.com\/rlcode\/reinforcement-learning-kr\/tree\/master\/1-grid-world\/3-monte-carlo\">\ucf54\ub4dc<\/a> \ucc38\uc870..<br \/>\n\ucc45\uc740 &#8220;\ud30c\uc774\uc36c\uacfc \ucf00\ub77c\uc2a4\ub85c \ubc30\uc6b0\ub294 \uac15\ud654\ud559\uc2b5&#8221; \ub0b4\uc6a9.<\/p>\n<p>Monte Carlo Method\uc758 \ucf54\ub4dc\uac00 \uc124\uba85\ub418\uc5b4 \uc788\uc9c0 \uc54a\uc544, python \ub514\ubc84\uadf8\ub85c \uadf8 \uacb0\uacfc\ub97c \uc7a0\uae50 \uc0b4\ud3c8\ub2e4.<\/p>\n<p>\uba54\uc778 \ucf54\ub4dc.<\/p>\n<pre class=\"lang:python decode:true\" title=\"\uba54\uc778 \ud568\uc218\"># \uba54\uc778 \ud568\uc218\nif __name__ == \"__main__\":\n    env = Env()\n    agent = MCAgent(actions=list(range(env.n_actions)))\n\n    for episode in range(1000):\n        state = env.reset()\n        action = agent.get_action(state)\n\n        while True:\n            env.render()\n\n            # \ub2e4\uc74c \uc0c1\ud0dc\ub85c \uc774\ub3d9\n            # \ubcf4\uc0c1\uc740 \uc22b\uc790\uc774\uace0, \uc644\ub8cc \uc5ec\ubd80\ub294 boolean\n            next_state, reward, done = env.step(action)\n            agent.save_sample(next_state, reward, done)\n            ##\ud14c\uc2a4\ud2b8\uc6a9.\n            print(agent.samples)\n            print(\"\\n\")\n\n            # \ub2e4\uc74c \ud589\ub3d9 \ubc1b\uc544\uc634\n            action = agent.get_action(next_state)\n\n            # \uc5d0\ud53c\uc18c\ub4dc\uac00 \uc644\ub8cc\ub410\uc744 \ub54c, \ud050 \ud568\uc218 \uc5c5\ub370\uc774\ud2b8\n            if done:\n                print(\"episode : \", episode)\n                agent.update()\n                agent.samples.clear()\n                break\n<\/pre>\n<p>6\ubc88\ud589: episode\ub97c 1,000\ubc88 \ubc18\ubcf5.<br \/>\n10\ubc88\ud589: \ubb34\ud55c\ubc88 \ubc18\ubcf5\uc774\ub098, 25\ubc88 \uacb0\uacfc\ub97c \ubcf4\uace0 break \uacb0\uc815. done\uc740 15\ubc88 \ud589\uc5d0\uc11c \uc5c5\ub370\uc774\ud2b8.<\/p>\n<pre class=\"lang:sh decode:true\" title=\"\ub514\ubc84\uadf81\">(Pdb) l\n109                 # \ubcf4\uc0c1\uc740 \uc22b\uc790\uc774\uace0, \uc644\ub8cc \uc5ec\ubd80\ub294 boolean\n110                 next_state, reward, done = env.step(action)\n111                 agent.save_sample(next_state, reward, done)\n112                 ##\ud14c\uc2a4\ud2b8\uc6a9.\n113                 print(agent.samples)\n114  -&gt;              print(\"\\n\")\n115     \n116                 # \ub2e4\uc74c \ud589\ub3d9 \ubc1b\uc544\uc634\n117                 action = agent.get_action(next_state)\n118\n119 # \uc5d0\ud53c\uc18c\ub4dc\uac00 \uc644\ub8cc\ub410\uc744 \ub54c, \ud050 \ud568\uc218 \uc5c5\ub370\uc774\ud2b8\n(Pdb) l\n120 if done:\n121 print(\"episode : \", episode)\n122 agent.update()\n123 agent.samples.clear()\n124 break\n[EOF]\n(Pdb) p agent.samples\n[[[0, 0], 0, False], [[0, 1], 0, False], [[1, 1], 0, False], [[1, 2], -100, True]]\n(Pdb)\n\n<\/pre>\n<p>\uc5d0\ud53c\uc18c\ub4dc\uac00 \ub05d\ub098\uba74, \uc790\uc2e0\uc774 \ubc29\ubb38\ud55c \ubaa8\ub4e0 \uc140\uc744 list\ub85c \uae30\uc5b5.<br \/>\n\uc544\ub798 \uc88c\ud45c \uc911, \ud589\uacfc \uc5f4\uc774 \ubc14\ub01c.<br \/>\n<img loading=\"lazy\" decoding=\"async\" class=\"alignleft size-full wp-image-1970\" src=\"http:\/\/now0930.tk\/wordpress\/wp-content\/uploads\/2018\/01\/GirdMonteCarlo.png\" alt=\"\" width=\"505\" height=\"526\" srcset=\"https:\/\/now0930.pe.kr\/wordpress\/wp-content\/uploads\/2018\/01\/GirdMonteCarlo.png 505w, https:\/\/now0930.pe.kr\/wordpress\/wp-content\/uploads\/2018\/01\/GirdMonteCarlo-288x300.png 288w\" sizes=\"auto, (max-width: 505px) 100vw, 505px\" \/><br \/>\n22\ubc88\ud589: \ud604\uc7ac \uc2a4\ud14c\uc774\ud2b8\uc5d0\uc11c \ub2e4\uc74c \uc2a4\ud14c\uc774\ud2b8\ub97c \uc608\uce21.<\/p>\n<pre class=\"lang:sh decode:true\">(Pdb) l\n 27             for reward in reversed(self.samples):\n 28                 state = str(reward[0])\n 29                 if state not in visit_state:\n 30                     visit_state.append(state)\n 31                     G_t = self.discount_factor * (reward[1] + G_t)\n 32  -&gt;                  value = self.value_table[state]\n 33                     ##\ud14c\uc2a4\ud2b8..\n 34                     print(\"Value is \",value)\n 35                     self.value_table[state] = (value +\n 36                                                self.learning_rate * (G_t - value))\n 37     \n(Pdb) \n 38                     print(\"Value state is\", state)\n 39                     #print(\"type of value_table is\", type(self.value_table))\n 40                     print(\"Value table is\", self.value_table)\n 41                     #print(\"Value table is\",self.value_table[state])\n 42     \n 43                     print(\"\\n\")\n 44     \n 45         # \ud050 \ud568\uc218\uc5d0 \ub530\ub77c\uc11c \ud589\ub3d9\uc744 \ubc18\ud658\n 46         # \uc785\uc2e4\ub860 \ud0d0\uc695 \uc815\ucc45\uc5d0 \ub530\ub77c\uc11c \ud589\ub3d9\uc744 \ubc18\ud658\n 47         def get_action(self, state):\n 48             if np.random.rand() &lt; self.epsilon:\n(Pdb) n\n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(34)update()\n-&gt; print(\"Value is \",value)\n(Pdb) n\nValue is  0.0\n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(35)update()\n-&gt; self.value_table[state] = (value +\n(Pdb) n\n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(36)update()\n-&gt; self.learning_rate * (G_t - value))\n(Pdb) \n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(38)update()\n-&gt; print(\"Value state is\", state)\n(Pdb) \nValue state is [1, 2]\n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(40)update()\n-&gt; print(\"Value table is\", self.value_table)\n(Pdb) \nValue table is defaultdict(&lt;class 'float'&gt;, {'[0, 0]': 0.0, '[0, 1]': 0.0, '[1, 0]': 0.0, '[1, 2]': -0.9, '[2, 1]': 0.0, '[1, 1]': 0.0, '[1, 3]': 0.0, '[0, 2]': 0.0, '[2, 2]': 0.0})\n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(43)update()\n-&gt; print(\"\\n\")\n(Pdb) p self.value_table\ndefaultdict(&lt;class 'float'&gt;, {'[0, 0]': 0.0, '[0, 1]': 0.0, '[1, 0]': 0.0, '[1, 2]': -0.9, '[2, 1]': 0.0, '[1, 1]': 0.0, '[1, 3]': 0.0, '[0, 2]': 0.0, '[2, 2]': 0.0})\n(Pdb) \n\n<\/pre>\n<p>self.value_table\uc744 \uac01 state \uad00\ub9ac. \ubc29\ubb38\ud55c state\ub97c list\ub85c \ucd94\uac00\ud558\uace0, \uac01 value\ub97c update. \ub098\uc911\uc5d0 \ud604\uc7ac state\ub97c \ubcf4\uace0 action\uc744 \uc0b0\ucd9c \ud560 \ub54c \ud544\uc694.<\/p>\n<p>update \ud558\uba74 teminal state\uc758 reward\ub97c \uac10\uac00\uc728\ub85c \uacc4\uc18d \uac10\uc18c\uc2dc\ucf1c \uc2dc\uc791 \uc2a4\ud14c\uc774\ud2b8\uae4c\uc9c0 value_table[state]\ub85c \uc5c5\ub370\uc774\ud2b8..<\/p>\n<p>\uc544\ub798 \ucf54\ub4dc\uc5d0\uc11c<\/p>\n<p>if state not in visit_state\uac00 \uc788\uae30 \ub54c\ubb38\uc5d0, \ucc98\uc74c \ubc29\ubb38\ud55c state\ub9cc \uad00\uc2ec\uc744 \uac16\uc74c.<\/p>\n<pre class=\"lang:sh decode:true\"> 19        # \uba54\ubaa8\ub9ac\uc5d0 \uc0d8\ud50c\uc744 \ucd94\uac00\n 20         def save_sample(self, state, reward, done):\n 21             self.samples.append([state, reward, done])\n 22     \n 23         # \ubaa8\ub4e0 \uc5d0\ud53c\uc18c\ub4dc\uc5d0\uc11c \uc5d0\uc774\uc804\ud2b8\uac00 \ubc29\ubb38\ud55c \uc0c1\ud0dc\uc758 \ud050 \ud568\uc218\ub97c \uc5c5\ub370\uc774\ud2b8\n 24  -&gt;      def update(self):\n 25             G_t = 0\n 26             visit_state = []\n 27             for reward in reversed(self.samples):\n 28                 state = str(reward[0])\n 29                 if state not in visit_state:\n(Pdb) n\n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(25)update()\n-&gt; G_t = 0\n(Pdb) p self.samples\n[[[1, 0], 0, False], [[1, 0], 0, False], [[2, 0], 0, False], [[3, 0], 0, False], [[3, 1], 0, False], [[3, 2], 0, False], [[2, 2], 100, True]]\n(Pdb) \n...\n(Pdb) l\n 24         def update(self):\n 25             G_t = 0\n 26             visit_state = []\n 27             for reward in reversed(self.samples):\n 28                 state = str(reward[0])\n 29  -&gt;              if state not in visit_state:\n 30                     visit_state.append(state)\n 31                     G_t = self.discount_factor * (reward[1] + G_t)\n 32                     value = self.value_table[state]\n 33                     ##\ud14c\uc2a4\ud2b8..\n 34                     print(\"Value is \",value)\n(Pdb) n\n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(30)update()\n-&gt; visit_state.append(state)\n(Pdb) \n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(31)update()\n-&gt; G_t = self.discount_factor * (reward[1] + G_t)\n(Pdb) \n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(32)update()\n-&gt; value = self.value_table[state]\n(Pdb) p self.value_table\ndefaultdict(&lt;class 'float'&gt;, {'[0, 0]': 10.523899590468769, '[0, 1]': 2.3971909843905967, '[1, 0]': 45.81574921045315, '[1, 2]': -2.67309, '[2, 1]': -19.99607765407679, '[1, 1]': 6.356969207767222, '[1, 3]': 0.0, '[0, 2]': -0.41447951099999986, '[2, 2]': 89.22430756838313, '[2, 0]': 50.92319972091863, '[3, 0]': 60.0913655106793, '[3, 1]': 66.88710703917133, '[0, 3]': -0.7290000000000001, '[0, 4]': -0.6561000000000001, '[1, 4]': 0.0, '[4, 0]': 8.302503491918047, '[4, 1]': 6.159613912118276, '[4, 2]': 6.304592658442578, '[3, 2]': 80.25132103821517, '[3, 3]': 6.970546731858152, '[2, 3]': 0.0, '[3, 4]': 0.6561000000000001, '[4, 3]': 0.0, '[2, 4]': 0.0, '[4, 4]': 0.0})\n\n<\/pre>\n<p>500\ud68c \uc2dc\ud589 \ud6c4, \uacfc\uac70 \uc774\ub825\uc744 \ubcf4\uba74 \uc544\ub798\uc640 \uac19\ub2e4.<\/p>\n<pre class=\"lang:sh decode:true\">(Pdb) p agent.samples\n[[[0, 1], 0, False], [[0, 0], 0, False], [[0, 1], 0, False], [[0, 0], 0, False], [[1, 0], 0, False], [[1, 1], 0, False], [[1, 0], 0, False], [[2, 0], 0, False], [[3, 0], 0, False], [[3, 1], 0, False], [[3, 2], 0, False], [[2, 2], 100, True]]\n(Pdb) l\n116                 # \ub2e4\uc74c \ud589\ub3d9 \ubc1b\uc544\uc634\n117                 action = agent.get_action(next_state)\n118     \n119                 # \uc5d0\ud53c\uc18c\ub4dc\uac00 \uc644\ub8cc\ub410\uc744 \ub54c, \ud050 \ud568\uc218 \uc5c5\ub370\uc774\ud2b8\n120                 if done:\n121 B-&gt;                  print(\"episode : \", episode)\n122                     agent.update()\n123                     agent.samples.clear()\n124                     break\n[EOF]\n(Pdb) n\nepisode :  500\n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(122)&lt;module&gt;()\n-&gt; agent.update()\n(Pdb) l\n117                 action = agent.get_action(next_state)\n118     \n119                 # \uc5d0\ud53c\uc18c\ub4dc\uac00 \uc644\ub8cc\ub410\uc744 \ub54c, \ud050 \ud568\uc218 \uc5c5\ub370\uc774\ud2b8\n120                 if done:\n121 B                   print(\"episode : \", episode)\n122  -&gt;                  agent.update()\n123                     agent.samples.clear()\n124                     break\n[EOF]\n(Pdb) s\n--Call--\n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(24)update()\n-&gt; def update(self):\n(Pdb) l\n 19         # \uba54\ubaa8\ub9ac\uc5d0 \uc0d8\ud50c\uc744 \ucd94\uac00\n 20         def save_sample(self, state, reward, done):\n 21             self.samples.append([state, reward, done])\n 22     \n 23         # \ubaa8\ub4e0 \uc5d0\ud53c\uc18c\ub4dc\uc5d0\uc11c \uc5d0\uc774\uc804\ud2b8\uac00 \ubc29\ubb38\ud55c \uc0c1\ud0dc\uc758 \ud050 \ud568\uc218\ub97c \uc5c5\ub370\uc774\ud2b8\n 24  -&gt;      def update(self):\n 25             G_t = 0\n 26             visit_state = []\n 27             for reward in reversed(self.samples):\n 28                 state = str(reward[0])\n 29                 if state not in visit_state:\n(Pdb) n\n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(25)update()\n-&gt; G_t = 0\n(Pdb) \n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(26)update()\n-&gt; visit_state = []\n(Pdb) \n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(27)update()\n-&gt; for reward in reversed(self.samples):\n(Pdb) p visit_state\n[]\n(Pdb) l\n 22     \n 23         # \ubaa8\ub4e0 \uc5d0\ud53c\uc18c\ub4dc\uc5d0\uc11c \uc5d0\uc774\uc804\ud2b8\uac00 \ubc29\ubb38\ud55c \uc0c1\ud0dc\uc758 \ud050 \ud568\uc218\ub97c \uc5c5\ub370\uc774\ud2b8\n 24         def update(self):\n 25             G_t = 0\n 26             visit_state = []\n 27  -&gt;          for reward in reversed(self.samples):\n 28                 state = str(reward[0])\n 29                 if state not in visit_state:\n 30                     visit_state.append(state)\n 31                     G_t = self.discount_factor * (reward[1] + G_t)\n 32                     value = self.value_table[state]\n(Pdb) p self.samples\n[[[0, 1], 0, False], [[0, 0], 0, False], [[0, 1], 0, False], [[0, 0], 0, False], [[1, 0], 0, False], [[1, 1], 0, False], [[1, 0], 0, False], [[2, 0], 0, False], [[3, 0], 0, False], [[3, 1], 0, False], [[3, 2], 0, False], [[2, 2], 100, True]]\n(Pdb) n\n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(28)update()\n-&gt; state = str(reward[0])\n(Pdb) p state\n[0, 0]\n(Pdb) n\n> \/home\/now0930\/tensorflow\/reinforceLearing\/reinforcement-learning-kr-master\/1-grid-world\/3-monte-carlo\/mc_agentTest.py(29)update()\n-&gt; if state not in visit_state:\n(Pdb) p state\n'[2, 2]'\n(Pdb) p self.value_table\ndefaultdict(&lt;class 'float'&gt;, {'[0, 0]': 12.843038729477103, '[0, 1]': 3.6010347623049843, '[1, 0]': 45.739579013968516, '[0, 2]': 0.38742048900000015, '[1, 1]': 4.629255057991681, '[2, 0]': 50.83890751506551, '[2, 2]': 89.2397438477723, '[3, 1]': 66.85889829029591, '[2, 1]': -19.99607765407679, '[3, 0]': 60.06357418599044, '[4, 0]': 8.024660160565901, '[3, 2]': 80.26033687696258, '[4, 1]': 5.96080478456966, '[3, 3]': 6.970546731858152, '[4, 2]': 6.029123110357315, '[3, 4]': 1.3056390000000002, '[2, 4]': 0.47829690000000014, '[4, 4]': 0.5904900000000002, '[2, 3]': 0.81, '[4, 3]': 0.5314410000000002, '[1, 3]': 0.0, '[1, 4]': 0.0, '[1, 2]': 0.0, '[0, 3]': 0.0})\n<\/pre>\n<p><img loading=\"lazy\" decoding=\"async\" class=\"alignleft size-large wp-image-2002\" src=\"http:\/\/now0930.tk\/wordpress\/wp-content\/uploads\/2018\/01\/20180126_202517-1024x984.jpg\" alt=\"\" width=\"474\" height=\"455\" srcset=\"https:\/\/now0930.pe.kr\/wordpress\/wp-content\/uploads\/2018\/01\/20180126_202517-1024x984.jpg 1024w, https:\/\/now0930.pe.kr\/wordpress\/wp-content\/uploads\/2018\/01\/20180126_202517-300x288.jpg 300w, https:\/\/now0930.pe.kr\/wordpress\/wp-content\/uploads\/2018\/01\/20180126_202517-768x738.jpg 768w\" sizes=\"auto, (max-width: 474px) 100vw, 474px\" \/><\/p>\n","protected":false},"excerpt":{"rendered":"<p>\uc5ec\uae30\uc5d0 \uc788\ub294 \ucf54\ub4dc \ucc38\uc870.. \ucc45\uc740 &#8220;\ud30c\uc774\uc36c\uacfc \ucf00\ub77c\uc2a4\ub85c \ubc30\uc6b0\ub294 \uac15\ud654\ud559\uc2b5&#8221; \ub0b4\uc6a9. Monte Carlo Method\uc758 \ucf54\ub4dc\uac00 \uc124\uba85\ub418\uc5b4 \uc788\uc9c0 \uc54a\uc544, python \ub514\ubc84\uadf8\ub85c \uadf8 [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"site-sidebar-layout":"default","site-content-layout":"","ast-site-content-layout":"default","site-content-style":"default","site-sidebar-style":"default","ast-global-header-display":"","ast-banner-title-visibility":"","ast-main-header-display":"","ast-hfb-above-header-display":"","ast-hfb-below-header-display":"","ast-hfb-mobile-header-display":"","site-post-title":"","ast-breadcrumbs-content":"","ast-featured-img":"","footer-sml-layout":"","ast-disable-related-posts":"","theme-transparent-header-meta":"","adv-header-id-meta":"","stick-header-meta":"","header-above-stick-meta":"","header-main-stick-meta":"","header-below-stick-meta":"","astra-migrate-meta-layouts":"default","ast-page-background-enabled":"default","ast-page-background-meta":{"desktop":{"background-color":"var(--ast-global-color-5)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"tablet":{"background-color":"","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"mobile":{"background-color":"","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""}},"ast-content-background-meta":{"desktop":{"background-color":"var(--ast-global-color-4)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"tablet":{"background-color":"var(--ast-global-color-4)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"mobile":{"background-color":"var(--ast-global-color-4)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""}},"_jetpack_newsletter_access":"","_jetpack_dont_email_post_to_subs":false,"_jetpack_newsletter_tier_id":0,"_jetpack_memberships_contains_paywalled_content":false,"_jetpack_memberships_contains_paid_content":false,"footnotes":"","jetpack_post_was_ever_published":false},"categories":[33],"tags":[270,110,271],"class_list":["post-1968","post","type-post","status-publish","format-standard","hentry","category-tensorflow","tag-montecarlo","tag-python","tag-reinforcementlearning"],"jetpack_featured_media_url":"","jetpack_sharing_enabled":true,"_links":{"self":[{"href":"https:\/\/now0930.pe.kr\/wordpress\/wp-json\/wp\/v2\/posts\/1968","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/now0930.pe.kr\/wordpress\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/now0930.pe.kr\/wordpress\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/now0930.pe.kr\/wordpress\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/now0930.pe.kr\/wordpress\/wp-json\/wp\/v2\/comments?post=1968"}],"version-history":[{"count":6,"href":"https:\/\/now0930.pe.kr\/wordpress\/wp-json\/wp\/v2\/posts\/1968\/revisions"}],"predecessor-version":[{"id":2005,"href":"https:\/\/now0930.pe.kr\/wordpress\/wp-json\/wp\/v2\/posts\/1968\/revisions\/2005"}],"wp:attachment":[{"href":"https:\/\/now0930.pe.kr\/wordpress\/wp-json\/wp\/v2\/media?parent=1968"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/now0930.pe.kr\/wordpress\/wp-json\/wp\/v2\/categories?post=1968"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/now0930.pe.kr\/wordpress\/wp-json\/wp\/v2\/tags?post=1968"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}