| [ |
| { |
| "current_steps": 127, |
| "loss": 1.7356, |
| "learning_rate": 0.0, |
| "epoch": 0.01 |
| }, |
| { |
| "current_steps": 255, |
| "loss": 1.7333, |
| "learning_rate": 0.0003, |
| "epoch": 0.01 |
| }, |
| { |
| "current_steps": 383, |
| "loss": 1.7095, |
| "learning_rate": 0.00029932735426008964, |
| "epoch": 0.02 |
| }, |
| { |
| "current_steps": 511, |
| "loss": 1.4713, |
| "learning_rate": 0.00029865470852017935, |
| "epoch": 0.03 |
| }, |
| { |
| "current_steps": 639, |
| "loss": 1.3242, |
| "learning_rate": 0.000297982062780269, |
| "epoch": 0.03 |
| }, |
| { |
| "current_steps": 767, |
| "loss": 1.3469, |
| "learning_rate": 0.00029730941704035873, |
| "epoch": 0.04 |
| }, |
| { |
| "current_steps": 895, |
| "loss": 1.2645, |
| "learning_rate": 0.0002966367713004484, |
| "epoch": 0.05 |
| }, |
| { |
| "current_steps": 1023, |
| "loss": 1.2507, |
| "learning_rate": 0.00029596412556053806, |
| "epoch": 0.05 |
| }, |
| { |
| "current_steps": 1151, |
| "loss": 1.2348, |
| "learning_rate": 0.00029529147982062777, |
| "epoch": 0.06 |
| }, |
| { |
| "current_steps": 1279, |
| "loss": 1.3604, |
| "learning_rate": 0.00029461883408071743, |
| "epoch": 0.07 |
| }, |
| { |
| "current_steps": 1407, |
| "loss": 1.2057, |
| "learning_rate": 0.00029394618834080715, |
| "epoch": 0.07 |
| }, |
| { |
| "current_steps": 1535, |
| "loss": 1.2899, |
| "learning_rate": 0.00029327354260089687, |
| "epoch": 0.08 |
| }, |
| { |
| "current_steps": 1663, |
| "loss": 1.2692, |
| "learning_rate": 0.00029260089686098653, |
| "epoch": 0.09 |
| }, |
| { |
| "current_steps": 1791, |
| "loss": 1.169, |
| "learning_rate": 0.0002919282511210762, |
| "epoch": 0.09 |
| }, |
| { |
| "current_steps": 1919, |
| "loss": 1.1576, |
| "learning_rate": 0.0002912556053811659, |
| "epoch": 0.1 |
| }, |
| { |
| "current_steps": 2047, |
| "loss": 1.1784, |
| "learning_rate": 0.00029058295964125557, |
| "epoch": 0.11 |
| }, |
| { |
| "current_steps": 2175, |
| "loss": 1.1418, |
| "learning_rate": 0.0002899103139013453, |
| "epoch": 0.11 |
| }, |
| { |
| "current_steps": 2303, |
| "loss": 1.2816, |
| "learning_rate": 0.00028923766816143495, |
| "epoch": 0.12 |
| }, |
| { |
| "current_steps": 2431, |
| "loss": 1.2302, |
| "learning_rate": 0.00028856502242152467, |
| "epoch": 0.13 |
| }, |
| { |
| "current_steps": 2559, |
| "loss": 1.1729, |
| "learning_rate": 0.00028789237668161433, |
| "epoch": 0.13 |
| }, |
| { |
| "current_steps": 2687, |
| "loss": 1.1625, |
| "learning_rate": 0.000287219730941704, |
| "epoch": 0.14 |
| }, |
| { |
| "current_steps": 2815, |
| "loss": 1.1987, |
| "learning_rate": 0.0002865470852017937, |
| "epoch": 0.15 |
| }, |
| { |
| "current_steps": 2943, |
| "loss": 1.1673, |
| "learning_rate": 0.00028587443946188337, |
| "epoch": 0.15 |
| }, |
| { |
| "current_steps": 3071, |
| "loss": 1.2074, |
| "learning_rate": 0.0002852017937219731, |
| "epoch": 0.16 |
| }, |
| { |
| "current_steps": 3199, |
| "loss": 1.1848, |
| "learning_rate": 0.00028452914798206275, |
| "epoch": 0.17 |
| }, |
| { |
| "current_steps": 3327, |
| "loss": 1.1209, |
| "learning_rate": 0.0002838565022421524, |
| "epoch": 0.17 |
| }, |
| { |
| "current_steps": 3455, |
| "loss": 1.091, |
| "learning_rate": 0.0002831838565022421, |
| "epoch": 0.18 |
| }, |
| { |
| "current_steps": 3583, |
| "loss": 1.1893, |
| "learning_rate": 0.00028251121076233184, |
| "epoch": 0.19 |
| }, |
| { |
| "current_steps": 3711, |
| "loss": 1.128, |
| "learning_rate": 0.0002818385650224215, |
| "epoch": 0.19 |
| }, |
| { |
| "current_steps": 3839, |
| "loss": 1.0913, |
| "learning_rate": 0.0002811659192825112, |
| "epoch": 0.2 |
| }, |
| { |
| "current_steps": 3967, |
| "loss": 1.1659, |
| "learning_rate": 0.0002804932735426009, |
| "epoch": 0.21 |
| }, |
| { |
| "current_steps": 4095, |
| "loss": 1.1555, |
| "learning_rate": 0.00027982062780269054, |
| "epoch": 0.21 |
| }, |
| { |
| "current_steps": 4223, |
| "loss": 1.1274, |
| "learning_rate": 0.00027914798206278026, |
| "epoch": 0.22 |
| }, |
| { |
| "current_steps": 4351, |
| "loss": 1.1193, |
| "learning_rate": 0.0002784753363228699, |
| "epoch": 0.23 |
| }, |
| { |
| "current_steps": 4479, |
| "loss": 1.1114, |
| "learning_rate": 0.00027780269058295964, |
| "epoch": 0.23 |
| }, |
| { |
| "current_steps": 4607, |
| "loss": 1.1123, |
| "learning_rate": 0.0002771300448430493, |
| "epoch": 0.24 |
| }, |
| { |
| "current_steps": 4735, |
| "loss": 1.135, |
| "learning_rate": 0.00027645739910313896, |
| "epoch": 0.25 |
| }, |
| { |
| "current_steps": 4863, |
| "loss": 1.1067, |
| "learning_rate": 0.0002757847533632287, |
| "epoch": 0.25 |
| }, |
| { |
| "current_steps": 4991, |
| "loss": 1.0959, |
| "learning_rate": 0.00027511210762331834, |
| "epoch": 0.26 |
| }, |
| { |
| "current_steps": 5119, |
| "loss": 1.0699, |
| "learning_rate": 0.00027443946188340806, |
| "epoch": 0.27 |
| }, |
| { |
| "current_steps": 5247, |
| "loss": 1.1366, |
| "learning_rate": 0.0002737668161434977, |
| "epoch": 0.27 |
| }, |
| { |
| "current_steps": 5375, |
| "loss": 1.1146, |
| "learning_rate": 0.0002730941704035874, |
| "epoch": 0.28 |
| }, |
| { |
| "current_steps": 5503, |
| "loss": 1.1146, |
| "learning_rate": 0.0002730941704035874, |
| "epoch": 0.28 |
| } |
| ] |