File size: 179,407 Bytes
f336d7e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
Scheduled epochs: 150 (epochs + cooldown_epochs). Warmup within epochs when warmup_prefix=False. LR stepped per epoch.
Train: 0 [   0/156 (  1%)]  Loss: 6.96 (6.96)  Time: 4.444s,  230.40/s  (4.444s,  230.40/s)  LR: 1.000e-05  Data: 1.824 (1.824)
Train: 0 [  50/156 ( 33%)]  Loss: 6.94 (6.94)  Time: 0.396s, 2588.32/s  (0.472s, 2171.74/s)  LR: 1.000e-05  Data: 0.027 (0.062)
Train: 0 [ 100/156 ( 65%)]  Loss: 6.94 (6.94)  Time: 0.400s, 2562.40/s  (0.434s, 2358.67/s)  LR: 1.000e-05  Data: 0.030 (0.045)
Train: 0 [ 150/156 ( 97%)]  Loss: 6.93 (6.94)  Time: 0.400s, 2559.79/s  (0.422s, 2424.30/s)  LR: 1.000e-05  Data: 0.026 (0.039)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.687 (1.687)  Loss:   6.946 ( 6.946)  Acc@1:   0.098 (  0.098)  Acc@5:   0.586 (  0.586)
Test: [  48/48]  Time: 0.701 (0.352)  Loss:   6.940 ( 6.939)  Acc@1:   0.118 (  0.076)  Acc@5:   0.354 (  0.490)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-0.pth.tar', 0.07599999996662139)

Train: 1 [   0/156 (  1%)]  Loss: 6.95 (6.95)  Time: 1.540s,  664.93/s  (1.540s,  664.93/s)  LR: 8.001e-02  Data: 1.031 (1.031)
Train: 1 [  50/156 ( 33%)]  Loss: 6.93 (6.93)  Time: 0.408s, 2508.83/s  (0.431s, 2378.13/s)  LR: 8.001e-02  Data: 0.027 (0.046)
Train: 1 [ 100/156 ( 65%)]  Loss: 6.92 (6.92)  Time: 0.409s, 2501.28/s  (0.421s, 2434.58/s)  LR: 8.001e-02  Data: 0.027 (0.037)
Train: 1 [ 150/156 ( 97%)]  Loss: 6.92 (6.92)  Time: 0.406s, 2522.21/s  (0.417s, 2456.31/s)  LR: 8.001e-02  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.442 (1.442)  Loss:   6.898 ( 6.898)  Acc@1:   0.000 (  0.000)  Acc@5:   0.391 (  0.391)
Test: [  48/48]  Time: 0.092 (0.331)  Loss:   6.885 ( 6.899)  Acc@1:   0.590 (  0.144)  Acc@5:   1.061 (  0.690)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-1.pth.tar', 0.14399999958038331)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-0.pth.tar', 0.07599999996662139)

Train: 2 [   0/156 (  1%)]  Loss: 6.91 (6.91)  Time: 1.701s,  601.97/s  (1.701s,  601.97/s)  LR: 1.600e-01  Data: 1.327 (1.327)
Train: 2 [  50/156 ( 33%)]  Loss: 6.93 (6.92)  Time: 0.410s, 2494.85/s  (0.435s, 2353.39/s)  LR: 1.600e-01  Data: 0.027 (0.052)
Train: 2 [ 100/156 ( 65%)]  Loss: 6.93 (6.92)  Time: 0.414s, 2474.25/s  (0.422s, 2423.82/s)  LR: 1.600e-01  Data: 0.028 (0.040)
Train: 2 [ 150/156 ( 97%)]  Loss: 6.92 (6.92)  Time: 0.410s, 2497.37/s  (0.418s, 2450.06/s)  LR: 1.600e-01  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.441 (1.441)  Loss:   6.862 ( 6.862)  Acc@1:   0.195 (  0.195)  Acc@5:   1.465 (  1.465)
Test: [  48/48]  Time: 0.092 (0.330)  Loss:   6.861 ( 6.863)  Acc@1:   0.472 (  0.442)  Acc@5:   2.005 (  1.758)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-2.pth.tar', 0.4419999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-1.pth.tar', 0.14399999958038331)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-0.pth.tar', 0.07599999996662139)

Train: 3 [   0/156 (  1%)]  Loss: 6.90 (6.90)  Time: 1.894s,  540.79/s  (1.894s,  540.79/s)  LR: 2.400e-01  Data: 1.519 (1.519)
Train: 3 [  50/156 ( 33%)]  Loss: 6.92 (6.91)  Time: 0.408s, 2509.12/s  (0.437s, 2344.41/s)  LR: 2.400e-01  Data: 0.027 (0.056)
Train: 3 [ 100/156 ( 65%)]  Loss: 6.91 (6.91)  Time: 0.406s, 2523.46/s  (0.423s, 2421.28/s)  LR: 2.400e-01  Data: 0.027 (0.042)
Train: 3 [ 150/156 ( 97%)]  Loss: 6.91 (6.91)  Time: 0.401s, 2550.95/s  (0.417s, 2454.26/s)  LR: 2.400e-01  Data: 0.026 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.495 (1.495)  Loss:   6.841 ( 6.841)  Acc@1:   0.684 (  0.684)  Acc@5:   1.953 (  1.953)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.826 ( 6.835)  Acc@1:   0.354 (  0.604)  Acc@5:   2.712 (  2.266)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-3.pth.tar', 0.6040000001525879)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-2.pth.tar', 0.4419999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-1.pth.tar', 0.14399999958038331)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-0.pth.tar', 0.07599999996662139)

Train: 4 [   0/156 (  1%)]  Loss: 6.91 (6.91)  Time: 1.645s,  622.46/s  (1.645s,  622.46/s)  LR: 3.200e-01  Data: 1.275 (1.275)
Train: 4 [  50/156 ( 33%)]  Loss: 6.89 (6.90)  Time: 0.408s, 2508.65/s  (0.429s, 2384.99/s)  LR: 3.200e-01  Data: 0.026 (0.051)
Train: 4 [ 100/156 ( 65%)]  Loss: 6.90 (6.90)  Time: 0.409s, 2503.66/s  (0.418s, 2448.27/s)  LR: 3.200e-01  Data: 0.028 (0.039)
Train: 4 [ 150/156 ( 97%)]  Loss: 6.91 (6.90)  Time: 0.405s, 2531.31/s  (0.416s, 2464.05/s)  LR: 3.200e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.460 (1.460)  Loss:   6.816 ( 6.816)  Acc@1:   0.781 (  0.781)  Acc@5:   2.246 (  2.246)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.796 ( 6.816)  Acc@1:   0.472 (  0.610)  Acc@5:   3.774 (  2.592)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-4.pth.tar', 0.6099999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-3.pth.tar', 0.6040000001525879)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-2.pth.tar', 0.4419999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-1.pth.tar', 0.14399999958038331)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-0.pth.tar', 0.07599999996662139)

Train: 5 [   0/156 (  1%)]  Loss: 6.90 (6.90)  Time: 1.594s,  642.44/s  (1.594s,  642.44/s)  LR: 3.989e-01  Data: 1.221 (1.221)
Train: 5 [  50/156 ( 33%)]  Loss: 6.90 (6.89)  Time: 0.411s, 2492.72/s  (0.429s, 2384.24/s)  LR: 3.989e-01  Data: 0.027 (0.050)
Train: 5 [ 100/156 ( 65%)]  Loss: 6.89 (6.89)  Time: 0.413s, 2478.90/s  (0.420s, 2440.73/s)  LR: 3.989e-01  Data: 0.027 (0.039)
Train: 5 [ 150/156 ( 97%)]  Loss: 6.89 (6.89)  Time: 0.413s, 2477.50/s  (0.417s, 2457.90/s)  LR: 3.989e-01  Data: 0.027 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.475 (1.475)  Loss:   6.795 ( 6.795)  Acc@1:   0.879 (  0.879)  Acc@5:   3.418 (  3.418)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   6.778 ( 6.795)  Acc@1:   0.708 (  0.852)  Acc@5:   3.656 (  3.102)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-5.pth.tar', 0.8520000003051758)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-4.pth.tar', 0.6099999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-3.pth.tar', 0.6040000001525879)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-2.pth.tar', 0.4419999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-1.pth.tar', 0.14399999958038331)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-0.pth.tar', 0.07599999996662139)

Train: 6 [   0/156 (  1%)]  Loss: 6.88 (6.88)  Time: 1.529s,  669.67/s  (1.529s,  669.67/s)  LR: 3.984e-01  Data: 1.141 (1.141)
Train: 6 [  50/156 ( 33%)]  Loss: 6.89 (6.88)  Time: 0.410s, 2496.54/s  (0.432s, 2369.04/s)  LR: 3.984e-01  Data: 0.028 (0.049)
Train: 6 [ 100/156 ( 65%)]  Loss: 6.89 (6.89)  Time: 0.407s, 2514.68/s  (0.421s, 2430.64/s)  LR: 3.984e-01  Data: 0.026 (0.038)
Train: 6 [ 150/156 ( 97%)]  Loss: 6.89 (6.89)  Time: 0.408s, 2512.16/s  (0.417s, 2454.22/s)  LR: 3.984e-01  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.487 (1.487)  Loss:   6.770 ( 6.770)  Acc@1:   1.270 (  1.270)  Acc@5:   3.613 (  3.613)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   6.751 ( 6.772)  Acc@1:   0.825 (  1.030)  Acc@5:   4.009 (  3.584)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-6.pth.tar', 1.0299999990081787)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-5.pth.tar', 0.8520000003051758)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-4.pth.tar', 0.6099999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-3.pth.tar', 0.6040000001525879)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-2.pth.tar', 0.4419999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-1.pth.tar', 0.14399999958038331)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-0.pth.tar', 0.07599999996662139)

Train: 7 [   0/156 (  1%)]  Loss: 6.88 (6.88)  Time: 1.756s,  583.19/s  (1.756s,  583.19/s)  LR: 3.979e-01  Data: 1.382 (1.382)
Train: 7 [  50/156 ( 33%)]  Loss: 6.88 (6.88)  Time: 0.406s, 2521.25/s  (0.433s, 2363.23/s)  LR: 3.979e-01  Data: 0.027 (0.053)
Train: 7 [ 100/156 ( 65%)]  Loss: 6.89 (6.88)  Time: 0.410s, 2495.37/s  (0.421s, 2429.58/s)  LR: 3.979e-01  Data: 0.027 (0.040)
Train: 7 [ 150/156 ( 97%)]  Loss: 6.89 (6.88)  Time: 0.402s, 2549.53/s  (0.417s, 2456.90/s)  LR: 3.979e-01  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.436 (1.436)  Loss:   6.752 ( 6.752)  Acc@1:   1.074 (  1.074)  Acc@5:   4.297 (  4.297)
Test: [  48/48]  Time: 0.090 (0.329)  Loss:   6.726 ( 6.749)  Acc@1:   0.943 (  1.072)  Acc@5:   3.774 (  3.896)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-7.pth.tar', 1.0719999997329712)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-6.pth.tar', 1.0299999990081787)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-5.pth.tar', 0.8520000003051758)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-4.pth.tar', 0.6099999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-3.pth.tar', 0.6040000001525879)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-2.pth.tar', 0.4419999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-1.pth.tar', 0.14399999958038331)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-0.pth.tar', 0.07599999996662139)

Train: 8 [   0/156 (  1%)]  Loss: 6.88 (6.88)  Time: 1.572s,  651.57/s  (1.572s,  651.57/s)  LR: 3.972e-01  Data: 1.159 (1.159)
Train: 8 [  50/156 ( 33%)]  Loss: 6.87 (6.87)  Time: 0.402s, 2545.18/s  (0.425s, 2412.22/s)  LR: 3.972e-01  Data: 0.028 (0.049)
Train: 8 [ 100/156 ( 65%)]  Loss: 6.88 (6.88)  Time: 0.403s, 2542.36/s  (0.414s, 2474.21/s)  LR: 3.972e-01  Data: 0.026 (0.038)
Train: 8 [ 150/156 ( 97%)]  Loss: 6.87 (6.88)  Time: 0.405s, 2529.21/s  (0.411s, 2491.28/s)  LR: 3.972e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.431 (1.431)  Loss:   6.734 ( 6.734)  Acc@1:   0.977 (  0.977)  Acc@5:   4.785 (  4.785)
Test: [  48/48]  Time: 0.092 (0.327)  Loss:   6.716 ( 6.735)  Acc@1:   1.179 (  1.216)  Acc@5:   5.189 (  4.512)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-8.pth.tar', 1.2159999991607666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-7.pth.tar', 1.0719999997329712)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-6.pth.tar', 1.0299999990081787)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-5.pth.tar', 0.8520000003051758)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-4.pth.tar', 0.6099999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-3.pth.tar', 0.6040000001525879)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-2.pth.tar', 0.4419999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-1.pth.tar', 0.14399999958038331)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-0.pth.tar', 0.07599999996662139)

Train: 9 [   0/156 (  1%)]  Loss: 6.87 (6.87)  Time: 1.745s,  586.69/s  (1.745s,  586.69/s)  LR: 3.965e-01  Data: 1.372 (1.372)
Train: 9 [  50/156 ( 33%)]  Loss: 6.87 (6.87)  Time: 0.411s, 2491.04/s  (0.436s, 2350.94/s)  LR: 3.965e-01  Data: 0.026 (0.054)
Train: 9 [ 100/156 ( 65%)]  Loss: 6.88 (6.87)  Time: 0.413s, 2480.64/s  (0.422s, 2425.50/s)  LR: 3.965e-01  Data: 0.026 (0.041)
Train: 9 [ 150/156 ( 97%)]  Loss: 6.89 (6.87)  Time: 0.403s, 2538.58/s  (0.417s, 2457.55/s)  LR: 3.965e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.489 (1.489)  Loss:   6.715 ( 6.715)  Acc@1:   1.270 (  1.270)  Acc@5:   4.688 (  4.688)
Test: [  48/48]  Time: 0.091 (0.328)  Loss:   6.693 ( 6.715)  Acc@1:   1.769 (  1.292)  Acc@5:   5.307 (  4.400)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-9.pth.tar', 1.2920000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-8.pth.tar', 1.2159999991607666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-7.pth.tar', 1.0719999997329712)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-6.pth.tar', 1.0299999990081787)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-5.pth.tar', 0.8520000003051758)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-4.pth.tar', 0.6099999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-3.pth.tar', 0.6040000001525879)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-2.pth.tar', 0.4419999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-1.pth.tar', 0.14399999958038331)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-0.pth.tar', 0.07599999996662139)

Train: 10 [   0/156 (  1%)]  Loss: 6.86 (6.86)  Time: 1.619s,  632.56/s  (1.619s,  632.56/s)  LR: 3.956e-01  Data: 1.165 (1.165)
Train: 10 [  50/156 ( 33%)]  Loss: 6.87 (6.87)  Time: 0.407s, 2518.77/s  (0.431s, 2378.45/s)  LR: 3.956e-01  Data: 0.027 (0.050)
Train: 10 [ 100/156 ( 65%)]  Loss: 6.87 (6.87)  Time: 0.418s, 2449.82/s  (0.420s, 2436.14/s)  LR: 3.956e-01  Data: 0.035 (0.039)
Train: 10 [ 150/156 ( 97%)]  Loss: 6.87 (6.87)  Time: 0.408s, 2507.98/s  (0.417s, 2455.33/s)  LR: 3.956e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.501 (1.501)  Loss:   6.688 ( 6.688)  Acc@1:   0.977 (  0.977)  Acc@5:   5.566 (  5.566)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.664 ( 6.688)  Acc@1:   1.415 (  1.418)  Acc@5:   5.189 (  4.940)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-10.pth.tar', 1.4180000006103515)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-9.pth.tar', 1.2920000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-8.pth.tar', 1.2159999991607666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-7.pth.tar', 1.0719999997329712)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-6.pth.tar', 1.0299999990081787)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-5.pth.tar', 0.8520000003051758)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-4.pth.tar', 0.6099999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-3.pth.tar', 0.6040000001525879)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-2.pth.tar', 0.4419999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-1.pth.tar', 0.14399999958038331)

Train: 11 [   0/156 (  1%)]  Loss: 6.87 (6.87)  Time: 1.578s,  648.80/s  (1.578s,  648.80/s)  LR: 3.947e-01  Data: 1.204 (1.204)
Train: 11 [  50/156 ( 33%)]  Loss: 6.88 (6.86)  Time: 0.416s, 2463.61/s  (0.432s, 2370.60/s)  LR: 3.947e-01  Data: 0.033 (0.050)
Train: 11 [ 100/156 ( 65%)]  Loss: 6.87 (6.86)  Time: 0.413s, 2478.52/s  (0.422s, 2428.76/s)  LR: 3.947e-01  Data: 0.027 (0.039)
Train: 11 [ 150/156 ( 97%)]  Loss: 6.87 (6.87)  Time: 0.406s, 2522.87/s  (0.418s, 2449.53/s)  LR: 3.947e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.409 (1.409)  Loss:   6.689 ( 6.689)  Acc@1:   1.855 (  1.855)  Acc@5:   5.469 (  5.469)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.672 ( 6.692)  Acc@1:   1.769 (  1.686)  Acc@5:   5.660 (  5.476)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-11.pth.tar', 1.6860000007629394)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-10.pth.tar', 1.4180000006103515)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-9.pth.tar', 1.2920000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-8.pth.tar', 1.2159999991607666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-7.pth.tar', 1.0719999997329712)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-6.pth.tar', 1.0299999990081787)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-5.pth.tar', 0.8520000003051758)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-4.pth.tar', 0.6099999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-3.pth.tar', 0.6040000001525879)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-2.pth.tar', 0.4419999998664856)

Train: 12 [   0/156 (  1%)]  Loss: 6.84 (6.84)  Time: 1.684s,  608.25/s  (1.684s,  608.25/s)  LR: 3.937e-01  Data: 1.311 (1.311)
Train: 12 [  50/156 ( 33%)]  Loss: 6.87 (6.86)  Time: 0.411s, 2488.55/s  (0.432s, 2370.26/s)  LR: 3.937e-01  Data: 0.034 (0.052)
Train: 12 [ 100/156 ( 65%)]  Loss: 6.87 (6.86)  Time: 0.411s, 2489.95/s  (0.422s, 2428.88/s)  LR: 3.937e-01  Data: 0.026 (0.040)
Train: 12 [ 150/156 ( 97%)]  Loss: 6.86 (6.86)  Time: 0.410s, 2497.92/s  (0.418s, 2446.94/s)  LR: 3.937e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.436 (1.436)  Loss:   6.661 ( 6.661)  Acc@1:   1.270 (  1.270)  Acc@5:   4.395 (  4.395)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.638 ( 6.657)  Acc@1:   1.533 (  1.540)  Acc@5:   5.542 (  5.192)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-11.pth.tar', 1.6860000007629394)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-12.pth.tar', 1.5399999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-10.pth.tar', 1.4180000006103515)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-9.pth.tar', 1.2920000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-8.pth.tar', 1.2159999991607666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-7.pth.tar', 1.0719999997329712)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-6.pth.tar', 1.0299999990081787)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-5.pth.tar', 0.8520000003051758)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-4.pth.tar', 0.6099999998664856)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-3.pth.tar', 0.6040000001525879)

Train: 13 [   0/156 (  1%)]  Loss: 6.84 (6.84)  Time: 1.819s,  562.99/s  (1.819s,  562.99/s)  LR: 3.926e-01  Data: 1.445 (1.445)
Train: 13 [  50/156 ( 33%)]  Loss: 6.85 (6.85)  Time: 0.408s, 2508.58/s  (0.434s, 2358.33/s)  LR: 3.926e-01  Data: 0.027 (0.055)
Train: 13 [ 100/156 ( 65%)]  Loss: 6.85 (6.85)  Time: 0.410s, 2496.35/s  (0.422s, 2426.85/s)  LR: 3.926e-01  Data: 0.029 (0.041)
Train: 13 [ 150/156 ( 97%)]  Loss: 6.87 (6.86)  Time: 0.407s, 2513.56/s  (0.419s, 2444.20/s)  LR: 3.926e-01  Data: 0.026 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.473 (1.473)  Loss:   6.654 ( 6.654)  Acc@1:   1.270 (  1.270)  Acc@5:   6.152 (  6.152)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.629 ( 6.649)  Acc@1:   1.651 (  1.626)  Acc@5:   5.660 (  5.668)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-11.pth.tar', 1.6860000007629394)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-13.pth.tar', 1.6260000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-12.pth.tar', 1.5399999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-10.pth.tar', 1.4180000006103515)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-9.pth.tar', 1.2920000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-8.pth.tar', 1.2159999991607666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-7.pth.tar', 1.0719999997329712)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-6.pth.tar', 1.0299999990081787)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-5.pth.tar', 0.8520000003051758)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-4.pth.tar', 0.6099999998664856)

Train: 14 [   0/156 (  1%)]  Loss: 6.84 (6.84)  Time: 1.570s,  652.06/s  (1.570s,  652.06/s)  LR: 3.915e-01  Data: 1.142 (1.142)
Train: 14 [  50/156 ( 33%)]  Loss: 6.84 (6.85)  Time: 0.405s, 2527.22/s  (0.428s, 2391.38/s)  LR: 3.915e-01  Data: 0.028 (0.049)
Train: 14 [ 100/156 ( 65%)]  Loss: 6.83 (6.85)  Time: 0.407s, 2518.76/s  (0.417s, 2453.06/s)  LR: 3.915e-01  Data: 0.026 (0.038)
Train: 14 [ 150/156 ( 97%)]  Loss: 6.84 (6.85)  Time: 0.409s, 2503.44/s  (0.415s, 2469.50/s)  LR: 3.915e-01  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.439 (1.439)  Loss:   6.633 ( 6.633)  Acc@1:   1.660 (  1.660)  Acc@5:   6.934 (  6.934)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   6.623 ( 6.641)  Acc@1:   1.651 (  1.946)  Acc@5:   6.486 (  6.250)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-14.pth.tar', 1.9460000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-11.pth.tar', 1.6860000007629394)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-13.pth.tar', 1.6260000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-12.pth.tar', 1.5399999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-10.pth.tar', 1.4180000006103515)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-9.pth.tar', 1.2920000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-8.pth.tar', 1.2159999991607666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-7.pth.tar', 1.0719999997329712)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-6.pth.tar', 1.0299999990081787)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-5.pth.tar', 0.8520000003051758)

Train: 15 [   0/156 (  1%)]  Loss: 6.84 (6.84)  Time: 1.720s,  595.39/s  (1.720s,  595.39/s)  LR: 3.902e-01  Data: 1.282 (1.282)
Train: 15 [  50/156 ( 33%)]  Loss: 6.85 (6.84)  Time: 0.414s, 2475.68/s  (0.439s, 2332.48/s)  LR: 3.902e-01  Data: 0.027 (0.052)
Train: 15 [ 100/156 ( 65%)]  Loss: 6.87 (6.85)  Time: 0.413s, 2479.15/s  (0.426s, 2404.87/s)  LR: 3.902e-01  Data: 0.027 (0.039)
Train: 15 [ 150/156 ( 97%)]  Loss: 6.85 (6.85)  Time: 0.414s, 2473.03/s  (0.421s, 2429.53/s)  LR: 3.902e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.495 (1.495)  Loss:   6.625 ( 6.625)  Acc@1:   1.660 (  1.660)  Acc@5:   5.859 (  5.859)
Test: [  48/48]  Time: 0.092 (0.331)  Loss:   6.614 ( 6.630)  Acc@1:   1.887 (  1.924)  Acc@5:   5.425 (  6.318)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-14.pth.tar', 1.9460000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-15.pth.tar', 1.9240000035095215)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-11.pth.tar', 1.6860000007629394)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-13.pth.tar', 1.6260000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-12.pth.tar', 1.5399999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-10.pth.tar', 1.4180000006103515)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-9.pth.tar', 1.2920000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-8.pth.tar', 1.2159999991607666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-7.pth.tar', 1.0719999997329712)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-6.pth.tar', 1.0299999990081787)

Train: 16 [   0/156 (  1%)]  Loss: 6.83 (6.83)  Time: 1.649s,  620.94/s  (1.649s,  620.94/s)  LR: 3.889e-01  Data: 1.190 (1.190)
Train: 16 [  50/156 ( 33%)]  Loss: 6.83 (6.84)  Time: 0.408s, 2508.07/s  (0.436s, 2346.67/s)  LR: 3.889e-01  Data: 0.027 (0.050)
Train: 16 [ 100/156 ( 65%)]  Loss: 6.85 (6.84)  Time: 0.414s, 2475.08/s  (0.423s, 2423.14/s)  LR: 3.889e-01  Data: 0.029 (0.039)
Train: 16 [ 150/156 ( 97%)]  Loss: 6.86 (6.84)  Time: 0.405s, 2525.91/s  (0.418s, 2452.12/s)  LR: 3.889e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.674 (1.674)  Loss:   6.622 ( 6.622)  Acc@1:   1.758 (  1.758)  Acc@5:   6.348 (  6.348)
Test: [  48/48]  Time: 0.093 (0.330)  Loss:   6.610 ( 6.623)  Acc@1:   2.241 (  2.062)  Acc@5:   6.368 (  6.502)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-16.pth.tar', 2.0619999996185303)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-14.pth.tar', 1.9460000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-15.pth.tar', 1.9240000035095215)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-11.pth.tar', 1.6860000007629394)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-13.pth.tar', 1.6260000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-12.pth.tar', 1.5399999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-10.pth.tar', 1.4180000006103515)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-9.pth.tar', 1.2920000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-8.pth.tar', 1.2159999991607666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-7.pth.tar', 1.0719999997329712)

Train: 17 [   0/156 (  1%)]  Loss: 6.85 (6.85)  Time: 1.542s,  663.90/s  (1.542s,  663.90/s)  LR: 3.875e-01  Data: 1.164 (1.164)
Train: 17 [  50/156 ( 33%)]  Loss: 6.85 (6.83)  Time: 0.409s, 2502.85/s  (0.434s, 2358.38/s)  LR: 3.875e-01  Data: 0.026 (0.050)
Train: 17 [ 100/156 ( 65%)]  Loss: 6.85 (6.84)  Time: 0.406s, 2522.03/s  (0.421s, 2433.81/s)  LR: 3.875e-01  Data: 0.028 (0.039)
Train: 17 [ 150/156 ( 97%)]  Loss: 6.86 (6.84)  Time: 0.404s, 2532.96/s  (0.416s, 2460.99/s)  LR: 3.875e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.420 (1.420)  Loss:   6.608 ( 6.608)  Acc@1:   1.465 (  1.465)  Acc@5:   6.445 (  6.445)
Test: [  48/48]  Time: 0.092 (0.330)  Loss:   6.597 ( 6.610)  Acc@1:   2.005 (  1.936)  Acc@5:   6.722 (  6.720)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-16.pth.tar', 2.0619999996185303)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-14.pth.tar', 1.9460000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-17.pth.tar', 1.9360000022125243)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-15.pth.tar', 1.9240000035095215)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-11.pth.tar', 1.6860000007629394)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-13.pth.tar', 1.6260000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-12.pth.tar', 1.5399999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-10.pth.tar', 1.4180000006103515)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-9.pth.tar', 1.2920000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-8.pth.tar', 1.2159999991607666)

Train: 18 [   0/156 (  1%)]  Loss: 6.83 (6.83)  Time: 1.674s,  611.59/s  (1.674s,  611.59/s)  LR: 3.860e-01  Data: 1.106 (1.106)
Train: 18 [  50/156 ( 33%)]  Loss: 6.83 (6.83)  Time: 0.416s, 2460.60/s  (0.437s, 2342.84/s)  LR: 3.860e-01  Data: 0.027 (0.048)
Train: 18 [ 100/156 ( 65%)]  Loss: 6.85 (6.83)  Time: 0.410s, 2498.05/s  (0.425s, 2409.82/s)  LR: 3.860e-01  Data: 0.026 (0.038)
Train: 18 [ 150/156 ( 97%)]  Loss: 6.85 (6.83)  Time: 0.403s, 2539.79/s  (0.419s, 2443.00/s)  LR: 3.860e-01  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.430 (1.430)  Loss:   6.600 ( 6.600)  Acc@1:   1.953 (  1.953)  Acc@5:   6.836 (  6.836)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.599 ( 6.603)  Acc@1:   1.415 (  2.060)  Acc@5:   5.896 (  6.710)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-16.pth.tar', 2.0619999996185303)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-18.pth.tar', 2.0600000006103514)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-14.pth.tar', 1.9460000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-17.pth.tar', 1.9360000022125243)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-15.pth.tar', 1.9240000035095215)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-11.pth.tar', 1.6860000007629394)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-13.pth.tar', 1.6260000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-12.pth.tar', 1.5399999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-10.pth.tar', 1.4180000006103515)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-9.pth.tar', 1.2920000007629395)

Train: 19 [   0/156 (  1%)]  Loss: 6.82 (6.82)  Time: 1.844s,  555.25/s  (1.844s,  555.25/s)  LR: 3.844e-01  Data: 1.150 (1.150)
Train: 19 [  50/156 ( 33%)]  Loss: 6.84 (6.82)  Time: 0.408s, 2511.74/s  (0.434s, 2358.11/s)  LR: 3.844e-01  Data: 0.027 (0.049)
Train: 19 [ 100/156 ( 65%)]  Loss: 6.83 (6.83)  Time: 0.411s, 2493.06/s  (0.422s, 2429.32/s)  LR: 3.844e-01  Data: 0.026 (0.038)
Train: 19 [ 150/156 ( 97%)]  Loss: 6.83 (6.83)  Time: 0.409s, 2501.58/s  (0.419s, 2446.15/s)  LR: 3.844e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.430 (1.430)  Loss:   6.561 ( 6.561)  Acc@1:   2.246 (  2.246)  Acc@5:   6.348 (  6.348)
Test: [  48/48]  Time: 0.092 (0.328)  Loss:   6.559 ( 6.570)  Acc@1:   2.123 (  2.418)  Acc@5:   7.311 (  7.300)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-19.pth.tar', 2.4180000009155274)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-16.pth.tar', 2.0619999996185303)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-18.pth.tar', 2.0600000006103514)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-14.pth.tar', 1.9460000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-17.pth.tar', 1.9360000022125243)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-15.pth.tar', 1.9240000035095215)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-11.pth.tar', 1.6860000007629394)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-13.pth.tar', 1.6260000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-12.pth.tar', 1.5399999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-10.pth.tar', 1.4180000006103515)

Train: 20 [   0/156 (  1%)]  Loss: 6.81 (6.81)  Time: 2.059s,  497.27/s  (2.059s,  497.27/s)  LR: 3.827e-01  Data: 1.416 (1.416)
Train: 20 [  50/156 ( 33%)]  Loss: 6.82 (6.82)  Time: 0.410s, 2498.45/s  (0.445s, 2299.33/s)  LR: 3.827e-01  Data: 0.027 (0.054)
Train: 20 [ 100/156 ( 65%)]  Loss: 6.82 (6.82)  Time: 0.413s, 2478.66/s  (0.429s, 2385.55/s)  LR: 3.827e-01  Data: 0.027 (0.041)
Train: 20 [ 150/156 ( 97%)]  Loss: 6.87 (6.82)  Time: 0.411s, 2493.49/s  (0.424s, 2417.28/s)  LR: 3.827e-01  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.436 (1.436)  Loss:   6.566 ( 6.566)  Acc@1:   1.953 (  1.953)  Acc@5:   6.250 (  6.250)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.559 ( 6.572)  Acc@1:   2.594 (  2.376)  Acc@5:   6.958 (  7.254)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-19.pth.tar', 2.4180000009155274)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-20.pth.tar', 2.375999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-16.pth.tar', 2.0619999996185303)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-18.pth.tar', 2.0600000006103514)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-14.pth.tar', 1.9460000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-17.pth.tar', 1.9360000022125243)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-15.pth.tar', 1.9240000035095215)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-11.pth.tar', 1.6860000007629394)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-13.pth.tar', 1.6260000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-12.pth.tar', 1.5399999993133544)

Train: 21 [   0/156 (  1%)]  Loss: 6.83 (6.83)  Time: 1.671s,  612.84/s  (1.671s,  612.84/s)  LR: 3.810e-01  Data: 1.294 (1.294)
Train: 21 [  50/156 ( 33%)]  Loss: 6.82 (6.81)  Time: 0.412s, 2483.36/s  (0.435s, 2352.70/s)  LR: 3.810e-01  Data: 0.028 (0.052)
Train: 21 [ 100/156 ( 65%)]  Loss: 6.82 (6.82)  Time: 0.410s, 2496.05/s  (0.425s, 2411.91/s)  LR: 3.810e-01  Data: 0.028 (0.040)
Train: 21 [ 150/156 ( 97%)]  Loss: 6.81 (6.82)  Time: 0.403s, 2539.55/s  (0.419s, 2443.59/s)  LR: 3.810e-01  Data: 0.024 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.417 (1.417)  Loss:   6.542 ( 6.542)  Acc@1:   2.441 (  2.441)  Acc@5:   7.422 (  7.422)
Test: [  48/48]  Time: 0.090 (0.329)  Loss:   6.536 ( 6.555)  Acc@1:   1.533 (  2.482)  Acc@5:   8.137 (  7.656)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-21.pth.tar', 2.4819999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-19.pth.tar', 2.4180000009155274)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-20.pth.tar', 2.375999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-16.pth.tar', 2.0619999996185303)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-18.pth.tar', 2.0600000006103514)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-14.pth.tar', 1.9460000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-17.pth.tar', 1.9360000022125243)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-15.pth.tar', 1.9240000035095215)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-11.pth.tar', 1.6860000007629394)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-13.pth.tar', 1.6260000020599366)

Train: 22 [   0/156 (  1%)]  Loss: 6.80 (6.80)  Time: 1.856s,  551.64/s  (1.856s,  551.64/s)  LR: 3.791e-01  Data: 1.485 (1.485)
Train: 22 [  50/156 ( 33%)]  Loss: 6.80 (6.80)  Time: 0.401s, 2551.13/s  (0.432s, 2371.84/s)  LR: 3.791e-01  Data: 0.026 (0.056)
Train: 22 [ 100/156 ( 65%)]  Loss: 6.81 (6.81)  Time: 0.408s, 2507.00/s  (0.419s, 2444.86/s)  LR: 3.791e-01  Data: 0.028 (0.042)
Train: 22 [ 150/156 ( 97%)]  Loss: 6.84 (6.81)  Time: 0.408s, 2509.34/s  (0.415s, 2466.40/s)  LR: 3.791e-01  Data: 0.025 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.444 (1.444)  Loss:   6.542 ( 6.542)  Acc@1:   2.344 (  2.344)  Acc@5:   7.422 (  7.422)
Test: [  48/48]  Time: 0.092 (0.327)  Loss:   6.533 ( 6.543)  Acc@1:   2.358 (  2.416)  Acc@5:   8.019 (  7.594)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-21.pth.tar', 2.4819999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-19.pth.tar', 2.4180000009155274)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-22.pth.tar', 2.4160000023651125)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-20.pth.tar', 2.375999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-16.pth.tar', 2.0619999996185303)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-18.pth.tar', 2.0600000006103514)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-14.pth.tar', 1.9460000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-17.pth.tar', 1.9360000022125243)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-15.pth.tar', 1.9240000035095215)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-11.pth.tar', 1.6860000007629394)

Train: 23 [   0/156 (  1%)]  Loss: 6.78 (6.78)  Time: 1.648s,  621.42/s  (1.648s,  621.42/s)  LR: 3.772e-01  Data: 1.271 (1.271)
Train: 23 [  50/156 ( 33%)]  Loss: 6.79 (6.80)  Time: 0.414s, 2471.02/s  (0.439s, 2333.28/s)  LR: 3.772e-01  Data: 0.028 (0.053)
Train: 23 [ 100/156 ( 65%)]  Loss: 6.82 (6.80)  Time: 0.411s, 2491.71/s  (0.426s, 2405.27/s)  LR: 3.772e-01  Data: 0.026 (0.040)
Train: 23 [ 150/156 ( 97%)]  Loss: 6.82 (6.81)  Time: 0.409s, 2505.16/s  (0.421s, 2429.89/s)  LR: 3.772e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.448 (1.448)  Loss:   6.523 ( 6.523)  Acc@1:   2.344 (  2.344)  Acc@5:   8.301 (  8.301)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   6.516 ( 6.529)  Acc@1:   2.476 (  2.632)  Acc@5:   8.373 (  8.196)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-23.pth.tar', 2.632000001068115)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-21.pth.tar', 2.4819999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-19.pth.tar', 2.4180000009155274)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-22.pth.tar', 2.4160000023651125)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-20.pth.tar', 2.375999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-16.pth.tar', 2.0619999996185303)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-18.pth.tar', 2.0600000006103514)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-14.pth.tar', 1.9460000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-17.pth.tar', 1.9360000022125243)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-15.pth.tar', 1.9240000035095215)

Train: 24 [   0/156 (  1%)]  Loss: 6.80 (6.80)  Time: 1.654s,  619.04/s  (1.654s,  619.04/s)  LR: 3.753e-01  Data: 1.277 (1.277)
Train: 24 [  50/156 ( 33%)]  Loss: 6.78 (6.79)  Time: 0.412s, 2484.53/s  (0.436s, 2346.02/s)  LR: 3.753e-01  Data: 0.029 (0.052)
Train: 24 [ 100/156 ( 65%)]  Loss: 6.83 (6.80)  Time: 0.415s, 2470.16/s  (0.425s, 2407.24/s)  LR: 3.753e-01  Data: 0.027 (0.040)
Train: 24 [ 150/156 ( 97%)]  Loss: 6.80 (6.80)  Time: 0.411s, 2489.97/s  (0.421s, 2433.49/s)  LR: 3.753e-01  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.429 (1.429)  Loss:   6.525 ( 6.525)  Acc@1:   2.246 (  2.246)  Acc@5:   7.031 (  7.031)
Test: [  48/48]  Time: 0.092 (0.328)  Loss:   6.513 ( 6.521)  Acc@1:   2.712 (  2.680)  Acc@5:   7.547 (  8.072)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-24.pth.tar', 2.679999998474121)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-23.pth.tar', 2.632000001068115)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-21.pth.tar', 2.4819999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-19.pth.tar', 2.4180000009155274)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-22.pth.tar', 2.4160000023651125)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-20.pth.tar', 2.375999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-16.pth.tar', 2.0619999996185303)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-18.pth.tar', 2.0600000006103514)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-14.pth.tar', 1.9460000020599366)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-17.pth.tar', 1.9360000022125243)

Train: 25 [   0/156 (  1%)]  Loss: 6.81 (6.81)  Time: 1.526s,  670.86/s  (1.526s,  670.86/s)  LR: 3.732e-01  Data: 1.149 (1.149)
Train: 25 [  50/156 ( 33%)]  Loss: 6.79 (6.79)  Time: 0.411s, 2491.70/s  (0.434s, 2358.98/s)  LR: 3.732e-01  Data: 0.028 (0.050)
Train: 25 [ 100/156 ( 65%)]  Loss: 6.78 (6.79)  Time: 0.414s, 2473.99/s  (0.423s, 2420.69/s)  LR: 3.732e-01  Data: 0.027 (0.039)
Train: 25 [ 150/156 ( 97%)]  Loss: 6.82 (6.80)  Time: 0.410s, 2497.29/s  (0.420s, 2438.48/s)  LR: 3.732e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.437 (1.437)  Loss:   6.493 ( 6.493)  Acc@1:   2.148 (  2.148)  Acc@5:   7.617 (  7.617)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.474 ( 6.494)  Acc@1:   4.009 (  2.690)  Acc@5:   9.080 (  8.356)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-25.pth.tar', 2.6900000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-24.pth.tar', 2.679999998474121)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-23.pth.tar', 2.632000001068115)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-21.pth.tar', 2.4819999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-19.pth.tar', 2.4180000009155274)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-22.pth.tar', 2.4160000023651125)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-20.pth.tar', 2.375999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-16.pth.tar', 2.0619999996185303)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-18.pth.tar', 2.0600000006103514)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-14.pth.tar', 1.9460000020599366)

Train: 26 [   0/156 (  1%)]  Loss: 6.77 (6.77)  Time: 1.832s,  559.02/s  (1.832s,  559.02/s)  LR: 3.711e-01  Data: 1.276 (1.276)
Train: 26 [  50/156 ( 33%)]  Loss: 6.80 (6.78)  Time: 0.414s, 2471.29/s  (0.440s, 2328.87/s)  LR: 3.711e-01  Data: 0.027 (0.052)
Train: 26 [ 100/156 ( 65%)]  Loss: 6.78 (6.78)  Time: 0.411s, 2491.13/s  (0.426s, 2403.09/s)  LR: 3.711e-01  Data: 0.027 (0.040)
Train: 26 [ 150/156 ( 97%)]  Loss: 6.82 (6.79)  Time: 0.414s, 2475.04/s  (0.422s, 2428.52/s)  LR: 3.711e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.435 (1.435)  Loss:   6.494 ( 6.494)  Acc@1:   3.320 (  3.320)  Acc@5:   8.887 (  8.887)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.483 ( 6.495)  Acc@1:   3.184 (  2.966)  Acc@5:   8.608 (  8.542)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-26.pth.tar', 2.9660000054168703)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-25.pth.tar', 2.6900000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-24.pth.tar', 2.679999998474121)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-23.pth.tar', 2.632000001068115)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-21.pth.tar', 2.4819999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-19.pth.tar', 2.4180000009155274)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-22.pth.tar', 2.4160000023651125)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-20.pth.tar', 2.375999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-16.pth.tar', 2.0619999996185303)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-18.pth.tar', 2.0600000006103514)

Train: 27 [   0/156 (  1%)]  Loss: 6.77 (6.77)  Time: 1.558s,  657.42/s  (1.558s,  657.42/s)  LR: 3.689e-01  Data: 1.183 (1.183)
Train: 27 [  50/156 ( 33%)]  Loss: 6.75 (6.77)  Time: 0.410s, 2496.82/s  (0.432s, 2371.53/s)  LR: 3.689e-01  Data: 0.028 (0.050)
Train: 27 [ 100/156 ( 65%)]  Loss: 6.79 (6.78)  Time: 0.412s, 2482.64/s  (0.423s, 2422.01/s)  LR: 3.689e-01  Data: 0.028 (0.039)
Train: 27 [ 150/156 ( 97%)]  Loss: 6.79 (6.78)  Time: 0.411s, 2491.06/s  (0.420s, 2440.95/s)  LR: 3.689e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.463 (1.463)  Loss:   6.469 ( 6.469)  Acc@1:   2.539 (  2.539)  Acc@5:   7.910 (  7.910)
Test: [  48/48]  Time: 0.092 (0.330)  Loss:   6.435 ( 6.456)  Acc@1:   2.594 (  3.142)  Acc@5:   8.726 (  9.014)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-27.pth.tar', 3.141999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-26.pth.tar', 2.9660000054168703)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-25.pth.tar', 2.6900000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-24.pth.tar', 2.679999998474121)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-23.pth.tar', 2.632000001068115)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-21.pth.tar', 2.4819999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-19.pth.tar', 2.4180000009155274)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-22.pth.tar', 2.4160000023651125)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-20.pth.tar', 2.375999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-16.pth.tar', 2.0619999996185303)

Train: 28 [   0/156 (  1%)]  Loss: 6.77 (6.77)  Time: 1.794s,  570.79/s  (1.794s,  570.79/s)  LR: 3.666e-01  Data: 1.233 (1.233)
Train: 28 [  50/156 ( 33%)]  Loss: 6.76 (6.76)  Time: 0.409s, 2503.65/s  (0.441s, 2321.59/s)  LR: 3.666e-01  Data: 0.027 (0.051)
Train: 28 [ 100/156 ( 65%)]  Loss: 6.79 (6.77)  Time: 0.405s, 2531.41/s  (0.425s, 2406.67/s)  LR: 3.666e-01  Data: 0.027 (0.039)
Train: 28 [ 150/156 ( 97%)]  Loss: 6.78 (6.78)  Time: 0.401s, 2553.65/s  (0.419s, 2445.38/s)  LR: 3.666e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.433 (1.433)  Loss:   6.467 ( 6.467)  Acc@1:   2.832 (  2.832)  Acc@5:   9.277 (  9.277)
Test: [  48/48]  Time: 0.090 (0.329)  Loss:   6.439 ( 6.462)  Acc@1:   3.302 (  3.178)  Acc@5:   9.316 (  9.046)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-28.pth.tar', 3.1779999960327148)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-27.pth.tar', 3.141999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-26.pth.tar', 2.9660000054168703)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-25.pth.tar', 2.6900000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-24.pth.tar', 2.679999998474121)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-23.pth.tar', 2.632000001068115)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-21.pth.tar', 2.4819999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-19.pth.tar', 2.4180000009155274)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-22.pth.tar', 2.4160000023651125)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-20.pth.tar', 2.375999999771118)

Train: 29 [   0/156 (  1%)]  Loss: 6.75 (6.75)  Time: 1.643s,  623.21/s  (1.643s,  623.21/s)  LR: 3.642e-01  Data: 1.272 (1.272)
Train: 29 [  50/156 ( 33%)]  Loss: 6.76 (6.75)  Time: 0.407s, 2515.73/s  (0.430s, 2380.51/s)  LR: 3.642e-01  Data: 0.027 (0.051)
Train: 29 [ 100/156 ( 65%)]  Loss: 6.77 (6.76)  Time: 0.406s, 2519.95/s  (0.419s, 2442.33/s)  LR: 3.642e-01  Data: 0.026 (0.040)
Train: 29 [ 150/156 ( 97%)]  Loss: 6.78 (6.77)  Time: 0.410s, 2496.23/s  (0.416s, 2459.26/s)  LR: 3.642e-01  Data: 0.024 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.451 (1.451)  Loss:   6.429 ( 6.429)  Acc@1:   2.344 (  2.344)  Acc@5:   9.277 (  9.277)
Test: [  48/48]  Time: 0.092 (0.330)  Loss:   6.420 ( 6.434)  Acc@1:   3.892 (  3.182)  Acc@5:   9.434 (  9.046)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-29.pth.tar', 3.1820000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-28.pth.tar', 3.1779999960327148)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-27.pth.tar', 3.141999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-26.pth.tar', 2.9660000054168703)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-25.pth.tar', 2.6900000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-24.pth.tar', 2.679999998474121)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-23.pth.tar', 2.632000001068115)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-21.pth.tar', 2.4819999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-19.pth.tar', 2.4180000009155274)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-22.pth.tar', 2.4160000023651125)

Train: 30 [   0/156 (  1%)]  Loss: 6.73 (6.73)  Time: 1.897s,  539.68/s  (1.897s,  539.68/s)  LR: 3.618e-01  Data: 1.519 (1.519)
Train: 30 [  50/156 ( 33%)]  Loss: 6.76 (6.74)  Time: 0.405s, 2529.15/s  (0.439s, 2330.83/s)  LR: 3.618e-01  Data: 0.027 (0.057)
Train: 30 [ 100/156 ( 65%)]  Loss: 6.75 (6.75)  Time: 0.403s, 2543.46/s  (0.422s, 2425.25/s)  LR: 3.618e-01  Data: 0.027 (0.042)
Train: 30 [ 150/156 ( 97%)]  Loss: 6.79 (6.76)  Time: 0.400s, 2562.87/s  (0.416s, 2462.25/s)  LR: 3.618e-01  Data: 0.025 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.445 (1.445)  Loss:   6.446 ( 6.446)  Acc@1:   3.809 (  3.809)  Acc@5:   9.863 (  9.863)
Test: [  48/48]  Time: 0.089 (0.328)  Loss:   6.425 ( 6.434)  Acc@1:   3.538 (  3.602)  Acc@5:   9.434 (  9.918)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-30.pth.tar', 3.6019999974822996)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-29.pth.tar', 3.1820000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-28.pth.tar', 3.1779999960327148)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-27.pth.tar', 3.141999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-26.pth.tar', 2.9660000054168703)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-25.pth.tar', 2.6900000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-24.pth.tar', 2.679999998474121)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-23.pth.tar', 2.632000001068115)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-21.pth.tar', 2.4819999993133544)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-19.pth.tar', 2.4180000009155274)

Train: 31 [   0/156 (  1%)]  Loss: 6.70 (6.70)  Time: 2.219s,  461.39/s  (2.219s,  461.39/s)  LR: 3.593e-01  Data: 1.848 (1.848)
Train: 31 [  50/156 ( 33%)]  Loss: 6.77 (6.73)  Time: 0.406s, 2523.48/s  (0.440s, 2327.00/s)  LR: 3.593e-01  Data: 0.027 (0.063)
Train: 31 [ 100/156 ( 65%)]  Loss: 6.76 (6.74)  Time: 0.407s, 2515.51/s  (0.424s, 2415.00/s)  LR: 3.593e-01  Data: 0.027 (0.045)
Train: 31 [ 150/156 ( 97%)]  Loss: 6.79 (6.75)  Time: 0.416s, 2463.61/s  (0.420s, 2439.58/s)  LR: 3.593e-01  Data: 0.026 (0.039)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.461 (1.461)  Loss:   6.426 ( 6.426)  Acc@1:   2.637 (  2.637)  Acc@5:   9.082 (  9.082)
Test: [  48/48]  Time: 0.091 (0.327)  Loss:   6.392 ( 6.413)  Acc@1:   4.363 (  3.484)  Acc@5:   9.906 (  9.730)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-30.pth.tar', 3.6019999974822996)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-31.pth.tar', 3.4839999964904784)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-29.pth.tar', 3.1820000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-28.pth.tar', 3.1779999960327148)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-27.pth.tar', 3.141999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-26.pth.tar', 2.9660000054168703)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-25.pth.tar', 2.6900000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-24.pth.tar', 2.679999998474121)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-23.pth.tar', 2.632000001068115)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-21.pth.tar', 2.4819999993133544)

Train: 32 [   0/156 (  1%)]  Loss: 6.70 (6.70)  Time: 1.987s,  515.37/s  (1.987s,  515.37/s)  LR: 3.567e-01  Data: 1.231 (1.231)
Train: 32 [  50/156 ( 33%)]  Loss: 6.73 (6.73)  Time: 0.412s, 2484.77/s  (0.439s, 2335.07/s)  LR: 3.567e-01  Data: 0.026 (0.050)
Train: 32 [ 100/156 ( 65%)]  Loss: 6.74 (6.74)  Time: 0.415s, 2469.72/s  (0.424s, 2414.12/s)  LR: 3.567e-01  Data: 0.027 (0.039)
Train: 32 [ 150/156 ( 97%)]  Loss: 6.72 (6.74)  Time: 0.414s, 2475.36/s  (0.421s, 2433.31/s)  LR: 3.567e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.472 (1.472)  Loss:   6.370 ( 6.370)  Acc@1:   3.418 (  3.418)  Acc@5:  10.547 ( 10.547)
Test: [  48/48]  Time: 0.090 (0.328)  Loss:   6.354 ( 6.363)  Acc@1:   3.774 (  3.674)  Acc@5:  10.024 ( 10.554)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-32.pth.tar', 3.674000002975464)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-30.pth.tar', 3.6019999974822996)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-31.pth.tar', 3.4839999964904784)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-29.pth.tar', 3.1820000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-28.pth.tar', 3.1779999960327148)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-27.pth.tar', 3.141999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-26.pth.tar', 2.9660000054168703)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-25.pth.tar', 2.6900000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-24.pth.tar', 2.679999998474121)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-23.pth.tar', 2.632000001068115)

Train: 33 [   0/156 (  1%)]  Loss: 6.71 (6.71)  Time: 1.815s,  564.06/s  (1.815s,  564.06/s)  LR: 3.541e-01  Data: 1.443 (1.443)
Train: 33 [  50/156 ( 33%)]  Loss: 6.73 (6.72)  Time: 0.404s, 2537.38/s  (0.433s, 2364.45/s)  LR: 3.541e-01  Data: 0.026 (0.055)
Train: 33 [ 100/156 ( 65%)]  Loss: 6.73 (6.72)  Time: 0.412s, 2486.63/s  (0.420s, 2437.14/s)  LR: 3.541e-01  Data: 0.027 (0.041)
Train: 33 [ 150/156 ( 97%)]  Loss: 6.73 (6.73)  Time: 0.410s, 2500.56/s  (0.417s, 2456.61/s)  LR: 3.541e-01  Data: 0.026 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.451 (1.451)  Loss:   6.395 ( 6.395)  Acc@1:   3.320 (  3.320)  Acc@5:   9.375 (  9.375)
Test: [  48/48]  Time: 0.093 (0.330)  Loss:   6.337 ( 6.371)  Acc@1:   3.892 (  3.560)  Acc@5:  11.321 ( 10.138)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-32.pth.tar', 3.674000002975464)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-30.pth.tar', 3.6019999974822996)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-33.pth.tar', 3.5600000016784668)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-31.pth.tar', 3.4839999964904784)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-29.pth.tar', 3.1820000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-28.pth.tar', 3.1779999960327148)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-27.pth.tar', 3.141999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-26.pth.tar', 2.9660000054168703)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-25.pth.tar', 2.6900000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-24.pth.tar', 2.679999998474121)

Train: 34 [   0/156 (  1%)]  Loss: 6.70 (6.70)  Time: 1.742s,  587.86/s  (1.742s,  587.86/s)  LR: 3.514e-01  Data: 1.151 (1.151)
Train: 34 [  50/156 ( 33%)]  Loss: 6.69 (6.70)  Time: 0.414s, 2472.22/s  (0.438s, 2335.39/s)  LR: 3.514e-01  Data: 0.029 (0.049)
Train: 34 [ 100/156 ( 65%)]  Loss: 6.72 (6.71)  Time: 0.411s, 2489.36/s  (0.423s, 2418.53/s)  LR: 3.514e-01  Data: 0.028 (0.038)
Train: 34 [ 150/156 ( 97%)]  Loss: 6.74 (6.72)  Time: 0.406s, 2520.50/s  (0.418s, 2449.01/s)  LR: 3.514e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.438 (1.438)  Loss:   6.369 ( 6.369)  Acc@1:   3.516 (  3.516)  Acc@5:  11.133 ( 11.133)
Test: [  48/48]  Time: 0.090 (0.328)  Loss:   6.322 ( 6.360)  Acc@1:   3.892 (  3.686)  Acc@5:  10.024 ( 10.340)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-34.pth.tar', 3.6860000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-32.pth.tar', 3.674000002975464)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-30.pth.tar', 3.6019999974822996)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-33.pth.tar', 3.5600000016784668)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-31.pth.tar', 3.4839999964904784)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-29.pth.tar', 3.1820000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-28.pth.tar', 3.1779999960327148)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-27.pth.tar', 3.141999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-26.pth.tar', 2.9660000054168703)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-25.pth.tar', 2.6900000044250487)

Train: 35 [   0/156 (  1%)]  Loss: 6.69 (6.69)  Time: 2.171s,  471.70/s  (2.171s,  471.70/s)  LR: 3.486e-01  Data: 1.797 (1.797)
Train: 35 [  50/156 ( 33%)]  Loss: 6.72 (6.69)  Time: 0.410s, 2498.20/s  (0.444s, 2306.53/s)  LR: 3.486e-01  Data: 0.027 (0.062)
Train: 35 [ 100/156 ( 65%)]  Loss: 6.72 (6.70)  Time: 0.419s, 2446.15/s  (0.429s, 2386.45/s)  LR: 3.486e-01  Data: 0.028 (0.045)
Train: 35 [ 150/156 ( 97%)]  Loss: 6.77 (6.71)  Time: 0.406s, 2521.43/s  (0.423s, 2418.61/s)  LR: 3.486e-01  Data: 0.025 (0.039)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.420 (1.420)  Loss:   6.354 ( 6.354)  Acc@1:   3.809 (  3.809)  Acc@5:  10.742 ( 10.742)
Test: [  48/48]  Time: 0.090 (0.328)  Loss:   6.319 ( 6.355)  Acc@1:   3.420 (  3.820)  Acc@5:  10.967 ( 10.650)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-35.pth.tar', 3.8199999987792967)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-34.pth.tar', 3.6860000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-32.pth.tar', 3.674000002975464)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-30.pth.tar', 3.6019999974822996)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-33.pth.tar', 3.5600000016784668)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-31.pth.tar', 3.4839999964904784)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-29.pth.tar', 3.1820000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-28.pth.tar', 3.1779999960327148)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-27.pth.tar', 3.141999999771118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-26.pth.tar', 2.9660000054168703)

Train: 36 [   0/156 (  1%)]  Loss: 6.66 (6.66)  Time: 1.512s,  677.05/s  (1.512s,  677.05/s)  LR: 3.458e-01  Data: 1.139 (1.139)
Train: 36 [  50/156 ( 33%)]  Loss: 6.68 (6.68)  Time: 0.406s, 2522.10/s  (0.427s, 2396.44/s)  LR: 3.458e-01  Data: 0.028 (0.049)
Train: 36 [ 100/156 ( 65%)]  Loss: 6.68 (6.69)  Time: 0.411s, 2494.41/s  (0.418s, 2451.89/s)  LR: 3.458e-01  Data: 0.027 (0.038)
Train: 36 [ 150/156 ( 97%)]  Loss: 6.71 (6.70)  Time: 0.409s, 2502.37/s  (0.416s, 2464.06/s)  LR: 3.458e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.418 (1.418)  Loss:   6.323 ( 6.323)  Acc@1:   4.004 (  4.004)  Acc@5:  12.207 ( 12.207)
Test: [  48/48]  Time: 0.091 (0.328)  Loss:   6.302 ( 6.342)  Acc@1:   4.835 (  4.232)  Acc@5:  11.085 ( 11.252)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-36.pth.tar', 4.231999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-35.pth.tar', 3.8199999987792967)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-34.pth.tar', 3.6860000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-32.pth.tar', 3.674000002975464)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-30.pth.tar', 3.6019999974822996)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-33.pth.tar', 3.5600000016784668)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-31.pth.tar', 3.4839999964904784)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-29.pth.tar', 3.1820000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-28.pth.tar', 3.1779999960327148)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-27.pth.tar', 3.141999999771118)

Train: 37 [   0/156 (  1%)]  Loss: 6.67 (6.67)  Time: 1.427s,  717.51/s  (1.427s,  717.51/s)  LR: 3.429e-01  Data: 1.050 (1.050)
Train: 37 [  50/156 ( 33%)]  Loss: 6.71 (6.67)  Time: 0.418s, 2452.38/s  (0.433s, 2362.36/s)  LR: 3.429e-01  Data: 0.027 (0.047)
Train: 37 [ 100/156 ( 65%)]  Loss: 6.73 (6.68)  Time: 0.408s, 2510.48/s  (0.423s, 2420.45/s)  LR: 3.429e-01  Data: 0.028 (0.037)
Train: 37 [ 150/156 ( 97%)]  Loss: 6.72 (6.69)  Time: 0.407s, 2514.95/s  (0.418s, 2447.59/s)  LR: 3.429e-01  Data: 0.025 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.462 (1.462)  Loss:   6.366 ( 6.366)  Acc@1:   3.809 (  3.809)  Acc@5:  11.035 ( 11.035)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.332 ( 6.365)  Acc@1:   4.363 (  4.254)  Acc@5:  12.264 ( 11.340)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-37.pth.tar', 4.2540000045776365)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-36.pth.tar', 4.231999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-35.pth.tar', 3.8199999987792967)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-34.pth.tar', 3.6860000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-32.pth.tar', 3.674000002975464)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-30.pth.tar', 3.6019999974822996)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-33.pth.tar', 3.5600000016784668)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-31.pth.tar', 3.4839999964904784)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-29.pth.tar', 3.1820000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-28.pth.tar', 3.1779999960327148)

Train: 38 [   0/156 (  1%)]  Loss: 6.67 (6.67)  Time: 1.883s,  543.87/s  (1.883s,  543.87/s)  LR: 3.399e-01  Data: 1.507 (1.507)
Train: 38 [  50/156 ( 33%)]  Loss: 6.69 (6.66)  Time: 0.414s, 2475.85/s  (0.441s, 2322.36/s)  LR: 3.399e-01  Data: 0.027 (0.056)
Train: 38 [ 100/156 ( 65%)]  Loss: 6.68 (6.67)  Time: 0.417s, 2456.06/s  (0.428s, 2393.18/s)  LR: 3.399e-01  Data: 0.028 (0.042)
Train: 38 [ 150/156 ( 97%)]  Loss: 6.74 (6.68)  Time: 0.413s, 2476.82/s  (0.423s, 2418.40/s)  LR: 3.399e-01  Data: 0.026 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.457 (1.457)  Loss:   6.337 ( 6.337)  Acc@1:   4.297 (  4.297)  Acc@5:  11.621 ( 11.621)
Test: [  48/48]  Time: 0.092 (0.331)  Loss:   6.317 ( 6.335)  Acc@1:   4.599 (  3.984)  Acc@5:  11.321 ( 11.126)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-37.pth.tar', 4.2540000045776365)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-36.pth.tar', 4.231999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-38.pth.tar', 3.9839999938964845)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-35.pth.tar', 3.8199999987792967)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-34.pth.tar', 3.6860000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-32.pth.tar', 3.674000002975464)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-30.pth.tar', 3.6019999974822996)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-33.pth.tar', 3.5600000016784668)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-31.pth.tar', 3.4839999964904784)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-29.pth.tar', 3.1820000016784666)

Train: 39 [   0/156 (  1%)]  Loss: 6.63 (6.63)  Time: 1.753s,  584.11/s  (1.753s,  584.11/s)  LR: 3.369e-01  Data: 1.377 (1.377)
Train: 39 [  50/156 ( 33%)]  Loss: 6.69 (6.64)  Time: 0.416s, 2464.31/s  (0.442s, 2317.48/s)  LR: 3.369e-01  Data: 0.028 (0.053)
Train: 39 [ 100/156 ( 65%)]  Loss: 6.68 (6.66)  Time: 0.413s, 2480.49/s  (0.428s, 2394.88/s)  LR: 3.369e-01  Data: 0.027 (0.040)
Train: 39 [ 150/156 ( 97%)]  Loss: 6.70 (6.67)  Time: 0.415s, 2468.76/s  (0.423s, 2420.95/s)  LR: 3.369e-01  Data: 0.027 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.418 (1.418)  Loss:   6.322 ( 6.322)  Acc@1:   4.297 (  4.297)  Acc@5:  12.207 ( 12.207)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.274 ( 6.315)  Acc@1:   5.189 (  4.436)  Acc@5:  12.736 ( 11.636)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-39.pth.tar', 4.436000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-37.pth.tar', 4.2540000045776365)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-36.pth.tar', 4.231999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-38.pth.tar', 3.9839999938964845)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-35.pth.tar', 3.8199999987792967)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-34.pth.tar', 3.6860000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-32.pth.tar', 3.674000002975464)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-30.pth.tar', 3.6019999974822996)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-33.pth.tar', 3.5600000016784668)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-31.pth.tar', 3.4839999964904784)

Train: 40 [   0/156 (  1%)]  Loss: 6.64 (6.64)  Time: 1.599s,  640.21/s  (1.599s,  640.21/s)  LR: 3.338e-01  Data: 1.159 (1.159)
Train: 40 [  50/156 ( 33%)]  Loss: 6.64 (6.63)  Time: 0.409s, 2502.77/s  (0.433s, 2363.06/s)  LR: 3.338e-01  Data: 0.026 (0.050)
Train: 40 [ 100/156 ( 65%)]  Loss: 6.68 (6.64)  Time: 0.412s, 2487.70/s  (0.424s, 2415.10/s)  LR: 3.338e-01  Data: 0.026 (0.039)
Train: 40 [ 150/156 ( 97%)]  Loss: 6.66 (6.65)  Time: 0.409s, 2505.40/s  (0.420s, 2436.53/s)  LR: 3.338e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.458 (1.458)  Loss:   6.329 ( 6.329)  Acc@1:   4.395 (  4.395)  Acc@5:  12.500 ( 12.500)
Test: [  48/48]  Time: 0.092 (0.330)  Loss:   6.301 ( 6.345)  Acc@1:   4.009 (  3.868)  Acc@5:  11.910 ( 10.654)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-39.pth.tar', 4.436000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-37.pth.tar', 4.2540000045776365)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-36.pth.tar', 4.231999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-38.pth.tar', 3.9839999938964845)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-40.pth.tar', 3.8680000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-35.pth.tar', 3.8199999987792967)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-34.pth.tar', 3.6860000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-32.pth.tar', 3.674000002975464)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-30.pth.tar', 3.6019999974822996)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-33.pth.tar', 3.5600000016784668)

Train: 41 [   0/156 (  1%)]  Loss: 6.58 (6.58)  Time: 1.598s,  640.71/s  (1.598s,  640.71/s)  LR: 3.307e-01  Data: 1.221 (1.221)
Train: 41 [  50/156 ( 33%)]  Loss: 6.66 (6.61)  Time: 0.417s, 2455.60/s  (0.443s, 2313.96/s)  LR: 3.307e-01  Data: 0.027 (0.051)
Train: 41 [ 100/156 ( 65%)]  Loss: 6.64 (6.63)  Time: 0.413s, 2480.86/s  (0.427s, 2396.40/s)  LR: 3.307e-01  Data: 0.028 (0.039)
Train: 41 [ 150/156 ( 97%)]  Loss: 6.68 (6.64)  Time: 0.414s, 2476.18/s  (0.423s, 2422.49/s)  LR: 3.307e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.429 (1.429)  Loss:   6.292 ( 6.292)  Acc@1:   4.883 (  4.883)  Acc@5:  11.719 ( 11.719)
Test: [  48/48]  Time: 0.092 (0.331)  Loss:   6.276 ( 6.297)  Acc@1:   5.896 (  4.546)  Acc@5:  12.028 ( 11.798)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-41.pth.tar', 4.546000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-39.pth.tar', 4.436000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-37.pth.tar', 4.2540000045776365)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-36.pth.tar', 4.231999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-38.pth.tar', 3.9839999938964845)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-40.pth.tar', 3.8680000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-35.pth.tar', 3.8199999987792967)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-34.pth.tar', 3.6860000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-32.pth.tar', 3.674000002975464)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-30.pth.tar', 3.6019999974822996)

Train: 42 [   0/156 (  1%)]  Loss: 6.58 (6.58)  Time: 1.730s,  591.78/s  (1.730s,  591.78/s)  LR: 3.275e-01  Data: 1.352 (1.352)
Train: 42 [  50/156 ( 33%)]  Loss: 6.61 (6.60)  Time: 0.411s, 2490.25/s  (0.439s, 2331.23/s)  LR: 3.275e-01  Data: 0.027 (0.053)
Train: 42 [ 100/156 ( 65%)]  Loss: 6.63 (6.62)  Time: 0.415s, 2466.70/s  (0.426s, 2404.50/s)  LR: 3.275e-01  Data: 0.027 (0.041)
Train: 42 [ 150/156 ( 97%)]  Loss: 6.66 (6.63)  Time: 0.415s, 2467.16/s  (0.422s, 2429.34/s)  LR: 3.275e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.448 (1.448)  Loss:   6.271 ( 6.271)  Acc@1:   3.809 (  3.809)  Acc@5:  12.305 ( 12.305)
Test: [  48/48]  Time: 0.092 (0.330)  Loss:   6.269 ( 6.299)  Acc@1:   4.363 (  4.672)  Acc@5:  12.854 ( 11.968)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-42.pth.tar', 4.672000004577637)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-41.pth.tar', 4.546000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-39.pth.tar', 4.436000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-37.pth.tar', 4.2540000045776365)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-36.pth.tar', 4.231999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-38.pth.tar', 3.9839999938964845)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-40.pth.tar', 3.8680000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-35.pth.tar', 3.8199999987792967)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-34.pth.tar', 3.6860000016784666)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-32.pth.tar', 3.674000002975464)

Train: 43 [   0/156 (  1%)]  Loss: 6.57 (6.57)  Time: 1.584s,  646.27/s  (1.584s,  646.27/s)  LR: 3.242e-01  Data: 1.173 (1.173)
Train: 43 [  50/156 ( 33%)]  Loss: 6.61 (6.59)  Time: 0.414s, 2473.78/s  (0.435s, 2351.87/s)  LR: 3.242e-01  Data: 0.028 (0.050)
Train: 43 [ 100/156 ( 65%)]  Loss: 6.63 (6.60)  Time: 0.407s, 2516.96/s  (0.424s, 2415.39/s)  LR: 3.242e-01  Data: 0.027 (0.039)
Train: 43 [ 150/156 ( 97%)]  Loss: 6.67 (6.62)  Time: 0.406s, 2525.14/s  (0.419s, 2442.58/s)  LR: 3.242e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.420 (1.420)  Loss:   6.259 ( 6.259)  Acc@1:   4.297 (  4.297)  Acc@5:  12.988 ( 12.988)
Test: [  48/48]  Time: 0.092 (0.331)  Loss:   6.242 ( 6.292)  Acc@1:   5.307 (  4.536)  Acc@5:  12.500 ( 11.976)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-42.pth.tar', 4.672000004577637)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-41.pth.tar', 4.546000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-43.pth.tar', 4.5360000022888185)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-39.pth.tar', 4.436000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-37.pth.tar', 4.2540000045776365)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-36.pth.tar', 4.231999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-38.pth.tar', 3.9839999938964845)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-40.pth.tar', 3.8680000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-35.pth.tar', 3.8199999987792967)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-34.pth.tar', 3.6860000016784666)

Train: 44 [   0/156 (  1%)]  Loss: 6.55 (6.55)  Time: 1.668s,  614.00/s  (1.668s,  614.00/s)  LR: 3.209e-01  Data: 1.129 (1.129)
Train: 44 [  50/156 ( 33%)]  Loss: 6.58 (6.57)  Time: 0.411s, 2489.16/s  (0.437s, 2343.56/s)  LR: 3.209e-01  Data: 0.026 (0.049)
Train: 44 [ 100/156 ( 65%)]  Loss: 6.61 (6.59)  Time: 0.411s, 2490.90/s  (0.424s, 2415.12/s)  LR: 3.209e-01  Data: 0.027 (0.038)
Train: 44 [ 150/156 ( 97%)]  Loss: 6.61 (6.60)  Time: 0.411s, 2493.17/s  (0.421s, 2434.86/s)  LR: 3.209e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.577 (1.577)  Loss:   6.237 ( 6.237)  Acc@1:   4.590 (  4.590)  Acc@5:  13.086 ( 13.086)
Test: [  48/48]  Time: 0.092 (0.328)  Loss:   6.257 ( 6.276)  Acc@1:   5.307 (  4.670)  Acc@5:  12.972 ( 11.926)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-42.pth.tar', 4.672000004577637)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-44.pth.tar', 4.670000002288818)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-41.pth.tar', 4.546000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-43.pth.tar', 4.5360000022888185)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-39.pth.tar', 4.436000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-37.pth.tar', 4.2540000045776365)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-36.pth.tar', 4.231999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-38.pth.tar', 3.9839999938964845)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-40.pth.tar', 3.8680000044250487)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-35.pth.tar', 3.8199999987792967)

Train: 45 [   0/156 (  1%)]  Loss: 6.55 (6.55)  Time: 1.678s,  610.14/s  (1.678s,  610.14/s)  LR: 3.176e-01  Data: 1.303 (1.303)
Train: 45 [  50/156 ( 33%)]  Loss: 6.59 (6.55)  Time: 0.413s, 2480.61/s  (0.434s, 2357.57/s)  LR: 3.176e-01  Data: 0.028 (0.052)
Train: 45 [ 100/156 ( 65%)]  Loss: 6.63 (6.57)  Time: 0.416s, 2460.70/s  (0.424s, 2413.22/s)  LR: 3.176e-01  Data: 0.027 (0.040)
Train: 45 [ 150/156 ( 97%)]  Loss: 6.65 (6.59)  Time: 0.408s, 2509.03/s  (0.420s, 2438.72/s)  LR: 3.176e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.440 (1.440)  Loss:   6.238 ( 6.238)  Acc@1:   4.395 (  4.395)  Acc@5:  13.867 ( 13.867)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.207 ( 6.279)  Acc@1:   5.778 (  4.708)  Acc@5:  13.561 ( 12.208)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-45.pth.tar', 4.708000005187988)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-42.pth.tar', 4.672000004577637)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-44.pth.tar', 4.670000002288818)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-41.pth.tar', 4.546000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-43.pth.tar', 4.5360000022888185)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-39.pth.tar', 4.436000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-37.pth.tar', 4.2540000045776365)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-36.pth.tar', 4.231999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-38.pth.tar', 3.9839999938964845)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-40.pth.tar', 3.8680000044250487)

Train: 46 [   0/156 (  1%)]  Loss: 6.53 (6.53)  Time: 1.727s,  592.85/s  (1.727s,  592.85/s)  LR: 3.141e-01  Data: 1.350 (1.350)
Train: 46 [  50/156 ( 33%)]  Loss: 6.58 (6.54)  Time: 0.416s, 2461.14/s  (0.437s, 2341.21/s)  LR: 3.141e-01  Data: 0.026 (0.053)
Train: 46 [ 100/156 ( 65%)]  Loss: 6.59 (6.56)  Time: 0.410s, 2495.27/s  (0.425s, 2410.27/s)  LR: 3.141e-01  Data: 0.027 (0.040)
Train: 46 [ 150/156 ( 97%)]  Loss: 6.58 (6.57)  Time: 0.415s, 2468.02/s  (0.421s, 2431.46/s)  LR: 3.141e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.443 (1.443)  Loss:   6.234 ( 6.234)  Acc@1:   4.883 (  4.883)  Acc@5:  13.770 ( 13.770)
Test: [  48/48]  Time: 0.091 (0.332)  Loss:   6.195 ( 6.262)  Acc@1:   5.778 (  5.116)  Acc@5:  12.972 ( 12.648)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-45.pth.tar', 4.708000005187988)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-42.pth.tar', 4.672000004577637)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-44.pth.tar', 4.670000002288818)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-41.pth.tar', 4.546000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-43.pth.tar', 4.5360000022888185)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-39.pth.tar', 4.436000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-37.pth.tar', 4.2540000045776365)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-36.pth.tar', 4.231999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-38.pth.tar', 3.9839999938964845)

Train: 47 [   0/156 (  1%)]  Loss: 6.49 (6.49)  Time: 1.584s,  646.27/s  (1.584s,  646.27/s)  LR: 3.107e-01  Data: 1.208 (1.208)
Train: 47 [  50/156 ( 33%)]  Loss: 6.56 (6.52)  Time: 0.419s, 2442.80/s  (0.436s, 2349.25/s)  LR: 3.107e-01  Data: 0.033 (0.051)
Train: 47 [ 100/156 ( 65%)]  Loss: 6.57 (6.54)  Time: 0.407s, 2515.97/s  (0.424s, 2414.24/s)  LR: 3.107e-01  Data: 0.026 (0.039)
Train: 47 [ 150/156 ( 97%)]  Loss: 6.62 (6.56)  Time: 0.402s, 2547.58/s  (0.418s, 2448.86/s)  LR: 3.107e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.453 (1.453)  Loss:   6.257 ( 6.257)  Acc@1:   5.371 (  5.371)  Acc@5:  14.062 ( 14.062)
Test: [  48/48]  Time: 0.090 (0.331)  Loss:   6.245 ( 6.284)  Acc@1:   4.835 (  4.870)  Acc@5:  13.090 ( 12.236)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-47.pth.tar', 4.870000007476807)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-45.pth.tar', 4.708000005187988)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-42.pth.tar', 4.672000004577637)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-44.pth.tar', 4.670000002288818)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-41.pth.tar', 4.546000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-43.pth.tar', 4.5360000022888185)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-39.pth.tar', 4.436000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-37.pth.tar', 4.2540000045776365)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-36.pth.tar', 4.231999999389648)

Train: 48 [   0/156 (  1%)]  Loss: 6.48 (6.48)  Time: 1.734s,  590.54/s  (1.734s,  590.54/s)  LR: 3.072e-01  Data: 1.361 (1.361)
Train: 48 [  50/156 ( 33%)]  Loss: 6.51 (6.51)  Time: 0.405s, 2525.72/s  (0.431s, 2373.64/s)  LR: 3.072e-01  Data: 0.025 (0.053)
Train: 48 [ 100/156 ( 65%)]  Loss: 6.51 (6.52)  Time: 0.411s, 2489.47/s  (0.420s, 2435.93/s)  LR: 3.072e-01  Data: 0.028 (0.040)
Train: 48 [ 150/156 ( 97%)]  Loss: 6.57 (6.54)  Time: 0.414s, 2474.67/s  (0.418s, 2451.71/s)  LR: 3.072e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.455 (1.455)  Loss:   6.209 ( 6.209)  Acc@1:   4.297 (  4.297)  Acc@5:  13.770 ( 13.770)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.186 ( 6.238)  Acc@1:   6.250 (  5.118)  Acc@5:  12.972 ( 12.608)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-47.pth.tar', 4.870000007476807)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-45.pth.tar', 4.708000005187988)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-42.pth.tar', 4.672000004577637)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-44.pth.tar', 4.670000002288818)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-41.pth.tar', 4.546000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-43.pth.tar', 4.5360000022888185)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-39.pth.tar', 4.436000007629395)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-37.pth.tar', 4.2540000045776365)

Train: 49 [   0/156 (  1%)]  Loss: 6.45 (6.45)  Time: 1.722s,  594.81/s  (1.722s,  594.81/s)  LR: 3.036e-01  Data: 1.160 (1.160)
Train: 49 [  50/156 ( 33%)]  Loss: 6.52 (6.49)  Time: 0.408s, 2509.32/s  (0.433s, 2364.69/s)  LR: 3.036e-01  Data: 0.027 (0.049)
Train: 49 [ 100/156 ( 65%)]  Loss: 6.59 (6.51)  Time: 0.417s, 2456.84/s  (0.422s, 2427.38/s)  LR: 3.036e-01  Data: 0.028 (0.038)
Train: 49 [ 150/156 ( 97%)]  Loss: 6.51 (6.52)  Time: 0.408s, 2512.46/s  (0.419s, 2443.93/s)  LR: 3.036e-01  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.445 (1.445)  Loss:   6.283 ( 6.283)  Acc@1:   5.176 (  5.176)  Acc@5:  12.500 ( 12.500)
Test: [  48/48]  Time: 0.090 (0.332)  Loss:   6.220 ( 6.293)  Acc@1:   5.542 (  5.008)  Acc@5:  15.094 ( 12.258)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-49.pth.tar', 5.007999999694825)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-47.pth.tar', 4.870000007476807)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-45.pth.tar', 4.708000005187988)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-42.pth.tar', 4.672000004577637)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-44.pth.tar', 4.670000002288818)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-41.pth.tar', 4.546000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-43.pth.tar', 4.5360000022888185)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-39.pth.tar', 4.436000007629395)

Train: 50 [   0/156 (  1%)]  Loss: 6.45 (6.45)  Time: 1.714s,  597.54/s  (1.714s,  597.54/s)  LR: 3.000e-01  Data: 1.184 (1.184)
Train: 50 [  50/156 ( 33%)]  Loss: 6.50 (6.47)  Time: 0.405s, 2526.62/s  (0.432s, 2371.54/s)  LR: 3.000e-01  Data: 0.027 (0.050)
Train: 50 [ 100/156 ( 65%)]  Loss: 6.51 (6.49)  Time: 0.409s, 2504.20/s  (0.420s, 2438.29/s)  LR: 3.000e-01  Data: 0.026 (0.039)
Train: 50 [ 150/156 ( 97%)]  Loss: 6.54 (6.51)  Time: 0.414s, 2473.51/s  (0.417s, 2453.86/s)  LR: 3.000e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.426 (1.426)  Loss:   6.260 ( 6.260)  Acc@1:   4.395 (  4.395)  Acc@5:  12.793 ( 12.793)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.251 ( 6.275)  Acc@1:   4.835 (  5.138)  Acc@5:  12.854 ( 12.606)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-50.pth.tar', 5.137999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-49.pth.tar', 5.007999999694825)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-47.pth.tar', 4.870000007476807)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-45.pth.tar', 4.708000005187988)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-42.pth.tar', 4.672000004577637)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-44.pth.tar', 4.670000002288818)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-41.pth.tar', 4.546000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-43.pth.tar', 4.5360000022888185)

Train: 51 [   0/156 (  1%)]  Loss: 6.45 (6.45)  Time: 1.756s,  583.01/s  (1.756s,  583.01/s)  LR: 2.964e-01  Data: 1.379 (1.379)
Train: 51 [  50/156 ( 33%)]  Loss: 6.42 (6.44)  Time: 0.412s, 2483.69/s  (0.440s, 2329.65/s)  LR: 2.964e-01  Data: 0.026 (0.054)
Train: 51 [ 100/156 ( 65%)]  Loss: 6.53 (6.47)  Time: 0.413s, 2481.03/s  (0.427s, 2400.12/s)  LR: 2.964e-01  Data: 0.027 (0.041)
Train: 51 [ 150/156 ( 97%)]  Loss: 6.55 (6.49)  Time: 0.415s, 2465.39/s  (0.422s, 2424.77/s)  LR: 2.964e-01  Data: 0.027 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.425 (1.425)  Loss:   6.281 ( 6.281)  Acc@1:   5.469 (  5.469)  Acc@5:  13.672 ( 13.672)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.231 ( 6.271)  Acc@1:   4.599 (  5.026)  Acc@5:  13.325 ( 12.458)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-50.pth.tar', 5.137999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-51.pth.tar', 5.026000001983642)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-49.pth.tar', 5.007999999694825)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-47.pth.tar', 4.870000007476807)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-45.pth.tar', 4.708000005187988)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-42.pth.tar', 4.672000004577637)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-44.pth.tar', 4.670000002288818)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-41.pth.tar', 4.546000007934571)

Train: 52 [   0/156 (  1%)]  Loss: 6.38 (6.38)  Time: 1.536s,  666.81/s  (1.536s,  666.81/s)  LR: 2.927e-01  Data: 1.160 (1.160)
Train: 52 [  50/156 ( 33%)]  Loss: 6.46 (6.43)  Time: 0.412s, 2486.43/s  (0.433s, 2362.49/s)  LR: 2.927e-01  Data: 0.028 (0.050)
Train: 52 [ 100/156 ( 65%)]  Loss: 6.48 (6.45)  Time: 0.409s, 2505.41/s  (0.423s, 2418.32/s)  LR: 2.927e-01  Data: 0.027 (0.039)
Train: 52 [ 150/156 ( 97%)]  Loss: 6.53 (6.47)  Time: 0.410s, 2495.35/s  (0.419s, 2441.42/s)  LR: 2.927e-01  Data: 0.023 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.461 (1.461)  Loss:   6.236 ( 6.236)  Acc@1:   5.859 (  5.859)  Acc@5:  13.281 ( 13.281)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.235 ( 6.274)  Acc@1:   4.245 (  5.038)  Acc@5:  13.325 ( 12.286)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-50.pth.tar', 5.137999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-52.pth.tar', 5.038000001831055)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-51.pth.tar', 5.026000001983642)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-49.pth.tar', 5.007999999694825)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-47.pth.tar', 4.870000007476807)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-45.pth.tar', 4.708000005187988)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-42.pth.tar', 4.672000004577637)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-44.pth.tar', 4.670000002288818)

Train: 53 [   0/156 (  1%)]  Loss: 6.38 (6.38)  Time: 1.719s,  595.80/s  (1.719s,  595.80/s)  LR: 2.889e-01  Data: 1.342 (1.342)
Train: 53 [  50/156 ( 33%)]  Loss: 6.48 (6.41)  Time: 0.413s, 2479.77/s  (0.437s, 2341.59/s)  LR: 2.889e-01  Data: 0.027 (0.053)
Train: 53 [ 100/156 ( 65%)]  Loss: 6.53 (6.44)  Time: 0.414s, 2474.85/s  (0.425s, 2408.54/s)  LR: 2.889e-01  Data: 0.027 (0.040)
Train: 53 [ 150/156 ( 97%)]  Loss: 6.54 (6.45)  Time: 0.415s, 2465.70/s  (0.421s, 2431.73/s)  LR: 2.889e-01  Data: 0.027 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.444 (1.444)  Loss:   6.186 ( 6.186)  Acc@1:   6.934 (  6.934)  Acc@5:  15.625 ( 15.625)
Test: [  48/48]  Time: 0.092 (0.332)  Loss:   6.186 ( 6.213)  Acc@1:   4.953 (  5.474)  Acc@5:  13.915 ( 13.238)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-53.pth.tar', 5.4739999940490724)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-50.pth.tar', 5.137999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-52.pth.tar', 5.038000001831055)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-51.pth.tar', 5.026000001983642)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-49.pth.tar', 5.007999999694825)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-47.pth.tar', 4.870000007476807)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-45.pth.tar', 4.708000005187988)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-42.pth.tar', 4.672000004577637)

Train: 54 [   0/156 (  1%)]  Loss: 6.40 (6.40)  Time: 1.570s,  652.35/s  (1.570s,  652.35/s)  LR: 2.852e-01  Data: 1.193 (1.193)
Train: 54 [  50/156 ( 33%)]  Loss: 6.38 (6.38)  Time: 0.413s, 2476.64/s  (0.435s, 2355.76/s)  LR: 2.852e-01  Data: 0.032 (0.050)
Train: 54 [ 100/156 ( 65%)]  Loss: 6.45 (6.41)  Time: 0.415s, 2468.00/s  (0.424s, 2416.28/s)  LR: 2.852e-01  Data: 0.028 (0.039)
Train: 54 [ 150/156 ( 97%)]  Loss: 6.50 (6.43)  Time: 0.408s, 2509.27/s  (0.420s, 2437.24/s)  LR: 2.852e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.471 (1.471)  Loss:   6.204 ( 6.204)  Acc@1:   5.762 (  5.762)  Acc@5:  14.062 ( 14.062)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.175 ( 6.244)  Acc@1:   6.368 (  5.446)  Acc@5:  14.623 ( 12.990)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-53.pth.tar', 5.4739999940490724)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-54.pth.tar', 5.446000002746582)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-50.pth.tar', 5.137999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-52.pth.tar', 5.038000001831055)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-51.pth.tar', 5.026000001983642)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-49.pth.tar', 5.007999999694825)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-47.pth.tar', 4.870000007476807)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-45.pth.tar', 4.708000005187988)

Train: 55 [   0/156 (  1%)]  Loss: 6.31 (6.31)  Time: 2.203s,  464.78/s  (2.203s,  464.78/s)  LR: 2.813e-01  Data: 1.827 (1.827)
Train: 55 [  50/156 ( 33%)]  Loss: 6.34 (6.36)  Time: 0.412s, 2487.94/s  (0.446s, 2297.56/s)  LR: 2.813e-01  Data: 0.026 (0.062)
Train: 55 [ 100/156 ( 65%)]  Loss: 6.43 (6.39)  Time: 0.409s, 2502.87/s  (0.429s, 2385.37/s)  LR: 2.813e-01  Data: 0.027 (0.045)
Train: 55 [ 150/156 ( 97%)]  Loss: 6.46 (6.41)  Time: 0.409s, 2502.59/s  (0.424s, 2416.30/s)  LR: 2.813e-01  Data: 0.025 (0.039)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.462 (1.462)  Loss:   6.182 ( 6.182)  Acc@1:   5.762 (  5.762)  Acc@5:  14.941 ( 14.941)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   6.181 ( 6.233)  Acc@1:   5.542 (  5.590)  Acc@5:  14.976 ( 13.178)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-55.pth.tar', 5.589999999694824)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-53.pth.tar', 5.4739999940490724)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-54.pth.tar', 5.446000002746582)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-50.pth.tar', 5.137999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-52.pth.tar', 5.038000001831055)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-51.pth.tar', 5.026000001983642)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-49.pth.tar', 5.007999999694825)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-47.pth.tar', 4.870000007476807)

Train: 56 [   0/156 (  1%)]  Loss: 6.33 (6.33)  Time: 1.616s,  633.72/s  (1.616s,  633.72/s)  LR: 2.775e-01  Data: 1.238 (1.238)
Train: 56 [  50/156 ( 33%)]  Loss: 6.41 (6.34)  Time: 0.417s, 2454.96/s  (0.436s, 2350.72/s)  LR: 2.775e-01  Data: 0.028 (0.051)
Train: 56 [ 100/156 ( 65%)]  Loss: 6.44 (6.37)  Time: 0.406s, 2520.15/s  (0.423s, 2421.14/s)  LR: 2.775e-01  Data: 0.026 (0.039)
Train: 56 [ 150/156 ( 97%)]  Loss: 6.46 (6.39)  Time: 0.411s, 2491.53/s  (0.418s, 2447.55/s)  LR: 2.775e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.458 (1.458)  Loss:   6.140 ( 6.140)  Acc@1:   6.348 (  6.348)  Acc@5:  14.844 ( 14.844)
Test: [  48/48]  Time: 0.092 (0.330)  Loss:   6.169 ( 6.212)  Acc@1:   5.896 (  5.562)  Acc@5:  13.208 ( 13.226)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-55.pth.tar', 5.589999999694824)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-56.pth.tar', 5.562000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-53.pth.tar', 5.4739999940490724)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-54.pth.tar', 5.446000002746582)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-50.pth.tar', 5.137999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-52.pth.tar', 5.038000001831055)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-51.pth.tar', 5.026000001983642)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-49.pth.tar', 5.007999999694825)

Train: 57 [   0/156 (  1%)]  Loss: 6.30 (6.30)  Time: 1.750s,  585.01/s  (1.750s,  585.01/s)  LR: 2.736e-01  Data: 1.373 (1.373)
Train: 57 [  50/156 ( 33%)]  Loss: 6.37 (6.31)  Time: 0.417s, 2457.90/s  (0.440s, 2328.66/s)  LR: 2.736e-01  Data: 0.028 (0.053)
Train: 57 [ 100/156 ( 65%)]  Loss: 6.35 (6.34)  Time: 0.411s, 2490.39/s  (0.425s, 2406.73/s)  LR: 2.736e-01  Data: 0.029 (0.041)
Train: 57 [ 150/156 ( 97%)]  Loss: 6.44 (6.36)  Time: 0.408s, 2508.91/s  (0.420s, 2435.84/s)  LR: 2.736e-01  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.454 (1.454)  Loss:   6.223 ( 6.223)  Acc@1:   5.762 (  5.762)  Acc@5:  13.770 ( 13.770)
Test: [  48/48]  Time: 0.091 (0.332)  Loss:   6.224 ( 6.272)  Acc@1:   5.896 (  5.148)  Acc@5:  12.736 ( 12.336)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-55.pth.tar', 5.589999999694824)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-56.pth.tar', 5.562000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-53.pth.tar', 5.4739999940490724)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-54.pth.tar', 5.446000002746582)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-57.pth.tar', 5.147999999847412)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-50.pth.tar', 5.137999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-52.pth.tar', 5.038000001831055)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-51.pth.tar', 5.026000001983642)

Train: 58 [   0/156 (  1%)]  Loss: 6.26 (6.26)  Time: 1.734s,  590.41/s  (1.734s,  590.41/s)  LR: 2.697e-01  Data: 1.358 (1.358)
Train: 58 [  50/156 ( 33%)]  Loss: 6.35 (6.29)  Time: 0.413s, 2477.92/s  (0.437s, 2342.93/s)  LR: 2.697e-01  Data: 0.028 (0.053)
Train: 58 [ 100/156 ( 65%)]  Loss: 6.38 (6.33)  Time: 0.408s, 2512.70/s  (0.424s, 2412.29/s)  LR: 2.697e-01  Data: 0.027 (0.041)
Train: 58 [ 150/156 ( 97%)]  Loss: 6.41 (6.35)  Time: 0.407s, 2518.23/s  (0.419s, 2442.19/s)  LR: 2.697e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.449 (1.449)  Loss:   6.278 ( 6.278)  Acc@1:   5.176 (  5.176)  Acc@5:  13.086 ( 13.086)
Test: [  48/48]  Time: 0.091 (0.331)  Loss:   6.224 ( 6.294)  Acc@1:   6.014 (  5.280)  Acc@5:  13.208 ( 11.980)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-55.pth.tar', 5.589999999694824)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-56.pth.tar', 5.562000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-53.pth.tar', 5.4739999940490724)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-54.pth.tar', 5.446000002746582)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-58.pth.tar', 5.280000002593994)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-57.pth.tar', 5.147999999847412)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-50.pth.tar', 5.137999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-52.pth.tar', 5.038000001831055)

Train: 59 [   0/156 (  1%)]  Loss: 6.28 (6.28)  Time: 2.314s,  442.51/s  (2.314s,  442.51/s)  LR: 2.658e-01  Data: 1.142 (1.142)
Train: 59 [  50/156 ( 33%)]  Loss: 6.27 (6.27)  Time: 0.412s, 2486.43/s  (0.449s, 2279.44/s)  LR: 2.658e-01  Data: 0.027 (0.049)
Train: 59 [ 100/156 ( 65%)]  Loss: 6.29 (6.30)  Time: 0.407s, 2513.15/s  (0.429s, 2385.22/s)  LR: 2.658e-01  Data: 0.027 (0.038)
Train: 59 [ 150/156 ( 97%)]  Loss: 6.39 (6.32)  Time: 0.411s, 2494.23/s  (0.423s, 2422.21/s)  LR: 2.658e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.449 (1.449)  Loss:   6.209 ( 6.209)  Acc@1:   6.055 (  6.055)  Acc@5:  14.258 ( 14.258)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   6.197 ( 6.242)  Acc@1:   5.071 (  5.536)  Acc@5:  13.561 ( 12.928)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-55.pth.tar', 5.589999999694824)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-56.pth.tar', 5.562000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-59.pth.tar', 5.536000004882813)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-53.pth.tar', 5.4739999940490724)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-54.pth.tar', 5.446000002746582)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-58.pth.tar', 5.280000002593994)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-57.pth.tar', 5.147999999847412)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-50.pth.tar', 5.137999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-46.pth.tar', 5.11599999710083)

Train: 60 [   0/156 (  1%)]  Loss: 6.21 (6.21)  Time: 1.734s,  590.57/s  (1.734s,  590.57/s)  LR: 2.618e-01  Data: 1.125 (1.125)
Train: 60 [  50/156 ( 33%)]  Loss: 6.24 (6.23)  Time: 0.409s, 2506.69/s  (0.438s, 2338.59/s)  LR: 2.618e-01  Data: 0.027 (0.049)
Train: 60 [ 100/156 ( 65%)]  Loss: 6.33 (6.27)  Time: 0.410s, 2497.36/s  (0.424s, 2416.84/s)  LR: 2.618e-01  Data: 0.028 (0.038)
Train: 60 [ 150/156 ( 97%)]  Loss: 6.40 (6.30)  Time: 0.414s, 2474.84/s  (0.420s, 2439.81/s)  LR: 2.618e-01  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.422 (1.422)  Loss:   6.228 ( 6.228)  Acc@1:   6.152 (  6.152)  Acc@5:  14.062 ( 14.062)
Test: [  48/48]  Time: 0.092 (0.331)  Loss:   6.223 ( 6.257)  Acc@1:   5.425 (  5.348)  Acc@5:  12.972 ( 13.058)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-55.pth.tar', 5.589999999694824)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-56.pth.tar', 5.562000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-59.pth.tar', 5.536000004882813)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-53.pth.tar', 5.4739999940490724)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-54.pth.tar', 5.446000002746582)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-60.pth.tar', 5.347999996948242)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-58.pth.tar', 5.280000002593994)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-57.pth.tar', 5.147999999847412)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-50.pth.tar', 5.137999999389648)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-48.pth.tar', 5.118)

Train: 61 [   0/156 (  1%)]  Loss: 6.20 (6.20)  Time: 2.073s,  494.06/s  (2.073s,  494.06/s)  LR: 2.578e-01  Data: 1.695 (1.695)
Train: 61 [  50/156 ( 33%)]  Loss: 6.24 (6.21)  Time: 0.411s, 2491.31/s  (0.444s, 2305.58/s)  LR: 2.578e-01  Data: 0.026 (0.060)
Train: 61 [ 100/156 ( 65%)]  Loss: 6.30 (6.25)  Time: 0.410s, 2497.37/s  (0.428s, 2391.19/s)  LR: 2.578e-01  Data: 0.026 (0.044)
Train: 61 [ 150/156 ( 97%)]  Loss: 6.36 (6.27)  Time: 0.411s, 2490.73/s  (0.423s, 2420.67/s)  LR: 2.578e-01  Data: 0.026 (0.038)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.430 (1.430)  Loss:   6.199 ( 6.199)  Acc@1:   6.934 (  6.934)  Acc@5:  13.965 ( 13.965)
Test: [  48/48]  Time: 0.091 (0.328)  Loss:   6.160 ( 6.256)  Acc@1:   6.250 (  5.596)  Acc@5:  13.797 ( 12.852)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-61.pth.tar', 5.596)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-55.pth.tar', 5.589999999694824)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-56.pth.tar', 5.562000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-59.pth.tar', 5.536000004882813)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-53.pth.tar', 5.4739999940490724)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-54.pth.tar', 5.446000002746582)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-60.pth.tar', 5.347999996948242)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-58.pth.tar', 5.280000002593994)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-57.pth.tar', 5.147999999847412)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-50.pth.tar', 5.137999999389648)

Train: 62 [   0/156 (  1%)]  Loss: 6.18 (6.18)  Time: 1.509s,  678.37/s  (1.509s,  678.37/s)  LR: 2.538e-01  Data: 1.134 (1.134)
Train: 62 [  50/156 ( 33%)]  Loss: 6.27 (6.18)  Time: 0.413s, 2477.17/s  (0.436s, 2350.81/s)  LR: 2.538e-01  Data: 0.027 (0.049)
Train: 62 [ 100/156 ( 65%)]  Loss: 6.28 (6.22)  Time: 0.414s, 2475.71/s  (0.424s, 2413.53/s)  LR: 2.538e-01  Data: 0.028 (0.039)
Train: 62 [ 150/156 ( 97%)]  Loss: 6.31 (6.25)  Time: 0.411s, 2490.82/s  (0.421s, 2434.97/s)  LR: 2.538e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.425 (1.425)  Loss:   6.168 ( 6.168)  Acc@1:   6.543 (  6.543)  Acc@5:  14.355 ( 14.355)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.189 ( 6.251)  Acc@1:   5.778 (  5.614)  Acc@5:  13.208 ( 12.980)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-62.pth.tar', 5.61399999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-61.pth.tar', 5.596)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-55.pth.tar', 5.589999999694824)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-56.pth.tar', 5.562000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-59.pth.tar', 5.536000004882813)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-53.pth.tar', 5.4739999940490724)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-54.pth.tar', 5.446000002746582)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-60.pth.tar', 5.347999996948242)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-58.pth.tar', 5.280000002593994)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-57.pth.tar', 5.147999999847412)

Train: 63 [   0/156 (  1%)]  Loss: 6.10 (6.10)  Time: 1.513s,  676.85/s  (1.513s,  676.85/s)  LR: 2.497e-01  Data: 1.076 (1.076)
Train: 63 [  50/156 ( 33%)]  Loss: 6.24 (6.16)  Time: 0.410s, 2499.81/s  (0.430s, 2379.33/s)  LR: 2.497e-01  Data: 0.028 (0.048)
Train: 63 [ 100/156 ( 65%)]  Loss: 6.25 (6.19)  Time: 0.412s, 2483.22/s  (0.421s, 2432.50/s)  LR: 2.497e-01  Data: 0.028 (0.038)
Train: 63 [ 150/156 ( 97%)]  Loss: 6.28 (6.22)  Time: 0.411s, 2488.48/s  (0.418s, 2447.12/s)  LR: 2.497e-01  Data: 0.024 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.426 (1.426)  Loss:   6.259 ( 6.259)  Acc@1:   5.371 (  5.371)  Acc@5:  12.793 ( 12.793)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.335 ( 6.344)  Acc@1:   5.307 (  5.072)  Acc@5:  11.792 ( 11.760)
Train: 64 [   0/156 (  1%)]  Loss: 6.07 (6.07)  Time: 1.718s,  595.87/s  (1.718s,  595.87/s)  LR: 2.457e-01  Data: 1.110 (1.110)
Train: 64 [  50/156 ( 33%)]  Loss: 6.21 (6.12)  Time: 0.412s, 2487.93/s  (0.439s, 2331.06/s)  LR: 2.457e-01  Data: 0.027 (0.048)
Train: 64 [ 100/156 ( 65%)]  Loss: 6.27 (6.16)  Time: 0.412s, 2482.86/s  (0.426s, 2404.88/s)  LR: 2.457e-01  Data: 0.027 (0.038)
Train: 64 [ 150/156 ( 97%)]  Loss: 6.26 (6.19)  Time: 0.411s, 2491.64/s  (0.422s, 2427.47/s)  LR: 2.457e-01  Data: 0.024 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.450 (1.450)  Loss:   6.236 ( 6.236)  Acc@1:   6.445 (  6.445)  Acc@5:  13.477 ( 13.477)
Test: [  48/48]  Time: 0.092 (0.330)  Loss:   6.244 ( 6.313)  Acc@1:   5.071 (  5.400)  Acc@5:  13.208 ( 12.266)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-62.pth.tar', 5.61399999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-61.pth.tar', 5.596)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-55.pth.tar', 5.589999999694824)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-56.pth.tar', 5.562000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-59.pth.tar', 5.536000004882813)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-53.pth.tar', 5.4739999940490724)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-54.pth.tar', 5.446000002746582)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-64.pth.tar', 5.400000004882813)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-60.pth.tar', 5.347999996948242)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-58.pth.tar', 5.280000002593994)

Train: 65 [   0/156 (  1%)]  Loss: 6.11 (6.11)  Time: 1.694s,  604.51/s  (1.694s,  604.51/s)  LR: 2.416e-01  Data: 1.112 (1.112)
Train: 65 [  50/156 ( 33%)]  Loss: 6.15 (6.10)  Time: 0.414s, 2472.42/s  (0.444s, 2307.18/s)  LR: 2.416e-01  Data: 0.027 (0.055)
Train: 65 [ 100/156 ( 65%)]  Loss: 6.25 (6.14)  Time: 0.412s, 2487.95/s  (0.428s, 2393.91/s)  LR: 2.416e-01  Data: 0.027 (0.041)
Train: 65 [ 150/156 ( 97%)]  Loss: 6.20 (6.16)  Time: 0.410s, 2498.16/s  (0.423s, 2421.98/s)  LR: 2.416e-01  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.558 (1.558)  Loss:   6.212 ( 6.212)  Acc@1:   7.227 (  7.227)  Acc@5:  13.574 ( 13.574)
Test: [  48/48]  Time: 0.092 (0.331)  Loss:   6.214 ( 6.294)  Acc@1:   5.542 (  5.462)  Acc@5:  13.090 ( 12.334)
Current checkpoints:
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-62.pth.tar', 5.61399999710083)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-61.pth.tar', 5.596)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-55.pth.tar', 5.589999999694824)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-56.pth.tar', 5.562000007934571)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-59.pth.tar', 5.536000004882813)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-53.pth.tar', 5.4739999940490724)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-65.pth.tar', 5.461999999694824)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-54.pth.tar', 5.446000002746582)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-64.pth.tar', 5.400000004882813)
 ('./output/train/ImageNetTraining80.0-frac-1over8/checkpoint-60.pth.tar', 5.347999996948242)

Train: 66 [   0/156 (  1%)]  Loss: 6.10 (6.10)  Time: 1.606s,  637.72/s  (1.606s,  637.72/s)  LR: 2.375e-01  Data: 1.096 (1.096)
Train: 66 [  50/156 ( 33%)]  Loss: 6.09 (6.06)  Time: 0.409s, 2504.84/s  (0.435s, 2354.14/s)  LR: 2.375e-01  Data: 0.027 (0.048)
Train: 66 [ 100/156 ( 65%)]  Loss: 6.20 (6.11)  Time: 0.408s, 2507.47/s  (0.423s, 2423.41/s)  LR: 2.375e-01  Data: 0.027 (0.038)
Train: 66 [ 150/156 ( 97%)]  Loss: 6.19 (6.14)  Time: 0.409s, 2502.76/s  (0.419s, 2443.46/s)  LR: 2.375e-01  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.416 (1.416)  Loss:   6.242 ( 6.242)  Acc@1:   5.957 (  5.957)  Acc@5:  13.770 ( 13.770)
Test: [  48/48]  Time: 0.092 (0.328)  Loss:   6.245 ( 6.332)  Acc@1:   5.778 (  5.244)  Acc@5:  12.146 ( 11.950)
Train: 67 [   0/156 (  1%)]  Loss: 6.02 (6.02)  Time: 1.493s,  685.65/s  (1.493s,  685.65/s)  LR: 2.334e-01  Data: 1.043 (1.043)
Train: 67 [  50/156 ( 33%)]  Loss: 6.11 (6.04)  Time: 0.422s, 2427.15/s  (0.433s, 2365.36/s)  LR: 2.334e-01  Data: 0.025 (0.047)
Train: 67 [ 100/156 ( 65%)]  Loss: 6.06 (6.08)  Time: 0.408s, 2510.51/s  (0.422s, 2423.95/s)  LR: 2.334e-01  Data: 0.026 (0.037)
Train: 67 [ 150/156 ( 97%)]  Loss: 6.17 (6.11)  Time: 0.407s, 2518.53/s  (0.418s, 2449.59/s)  LR: 2.334e-01  Data: 0.024 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.429 (1.429)  Loss:   6.194 ( 6.194)  Acc@1:   5.859 (  5.859)  Acc@5:  13.965 ( 13.965)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.222 ( 6.330)  Acc@1:   5.896 (  5.070)  Acc@5:  12.736 ( 12.068)
Train: 68 [   0/156 (  1%)]  Loss: 6.02 (6.02)  Time: 1.736s,  589.95/s  (1.736s,  589.95/s)  LR: 2.292e-01  Data: 1.349 (1.349)
Train: 68 [  50/156 ( 33%)]  Loss: 6.11 (6.01)  Time: 0.415s, 2469.25/s  (0.436s, 2346.36/s)  LR: 2.292e-01  Data: 0.027 (0.053)
Train: 68 [ 100/156 ( 65%)]  Loss: 6.09 (6.05)  Time: 0.413s, 2479.93/s  (0.425s, 2410.92/s)  LR: 2.292e-01  Data: 0.026 (0.040)
Train: 68 [ 150/156 ( 97%)]  Loss: 6.16 (6.08)  Time: 0.410s, 2498.46/s  (0.421s, 2433.73/s)  LR: 2.292e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.417 (1.417)  Loss:   6.277 ( 6.277)  Acc@1:   5.371 (  5.371)  Acc@5:  13.086 ( 13.086)
Test: [  48/48]  Time: 0.091 (0.328)  Loss:   6.318 ( 6.354)  Acc@1:   4.953 (  5.276)  Acc@5:  11.675 ( 12.132)
Train: 69 [   0/156 (  1%)]  Loss: 5.97 (5.97)  Time: 1.537s,  666.37/s  (1.537s,  666.37/s)  LR: 2.251e-01  Data: 1.136 (1.136)
Train: 69 [  50/156 ( 33%)]  Loss: 5.98 (5.98)  Time: 0.413s, 2481.45/s  (0.435s, 2356.59/s)  LR: 2.251e-01  Data: 0.027 (0.049)
Train: 69 [ 100/156 ( 65%)]  Loss: 6.14 (6.02)  Time: 0.416s, 2458.63/s  (0.424s, 2415.47/s)  LR: 2.251e-01  Data: 0.028 (0.038)
Train: 69 [ 150/156 ( 97%)]  Loss: 6.22 (6.05)  Time: 0.412s, 2484.40/s  (0.420s, 2438.37/s)  LR: 2.251e-01  Data: 0.024 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.445 (1.445)  Loss:   6.258 ( 6.258)  Acc@1:   5.566 (  5.566)  Acc@5:  13.965 ( 13.965)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   6.273 ( 6.363)  Acc@1:   5.660 (  5.234)  Acc@5:  13.443 ( 12.050)
Train: 70 [   0/156 (  1%)]  Loss: 5.90 (5.90)  Time: 1.487s,  688.54/s  (1.487s,  688.54/s)  LR: 2.209e-01  Data: 1.110 (1.110)
Train: 70 [  50/156 ( 33%)]  Loss: 6.01 (5.94)  Time: 0.408s, 2507.17/s  (0.434s, 2357.68/s)  LR: 2.209e-01  Data: 0.026 (0.049)
Train: 70 [ 100/156 ( 65%)]  Loss: 6.08 (5.98)  Time: 0.411s, 2493.02/s  (0.423s, 2421.84/s)  LR: 2.209e-01  Data: 0.027 (0.038)
Train: 70 [ 150/156 ( 97%)]  Loss: 6.12 (6.02)  Time: 0.407s, 2513.74/s  (0.420s, 2440.55/s)  LR: 2.209e-01  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.464 (1.464)  Loss:   6.299 ( 6.299)  Acc@1:   5.176 (  5.176)  Acc@5:  12.305 ( 12.305)
Test: [  48/48]  Time: 0.091 (0.333)  Loss:   6.328 ( 6.397)  Acc@1:   4.481 (  5.048)  Acc@5:  11.321 ( 11.450)
Train: 71 [   0/156 (  1%)]  Loss: 5.92 (5.92)  Time: 1.960s,  522.48/s  (1.960s,  522.48/s)  LR: 2.167e-01  Data: 1.584 (1.584)
Train: 71 [  50/156 ( 33%)]  Loss: 6.01 (5.91)  Time: 0.411s, 2493.75/s  (0.440s, 2328.74/s)  LR: 2.167e-01  Data: 0.027 (0.058)
Train: 71 [ 100/156 ( 65%)]  Loss: 6.03 (5.95)  Time: 0.406s, 2523.22/s  (0.426s, 2406.17/s)  LR: 2.167e-01  Data: 0.027 (0.043)
Train: 71 [ 150/156 ( 97%)]  Loss: 6.12 (5.99)  Time: 0.405s, 2527.36/s  (0.419s, 2441.39/s)  LR: 2.167e-01  Data: 0.026 (0.038)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.421 (1.421)  Loss:   6.391 ( 6.391)  Acc@1:   4.980 (  4.980)  Acc@5:  13.086 ( 13.086)
Test: [  48/48]  Time: 0.091 (0.327)  Loss:   6.357 ( 6.444)  Acc@1:   6.840 (  5.200)  Acc@5:  13.915 ( 11.624)
Train: 72 [   0/156 (  1%)]  Loss: 5.83 (5.83)  Time: 1.638s,  624.99/s  (1.638s,  624.99/s)  LR: 2.126e-01  Data: 1.263 (1.263)
Train: 72 [  50/156 ( 33%)]  Loss: 5.90 (5.87)  Time: 0.409s, 2501.77/s  (0.432s, 2368.59/s)  LR: 2.126e-01  Data: 0.027 (0.051)
Train: 72 [ 100/156 ( 65%)]  Loss: 6.08 (5.91)  Time: 0.412s, 2484.40/s  (0.422s, 2426.37/s)  LR: 2.126e-01  Data: 0.027 (0.039)
Train: 72 [ 150/156 ( 97%)]  Loss: 6.08 (5.95)  Time: 0.408s, 2510.06/s  (0.419s, 2442.96/s)  LR: 2.126e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.425 (1.425)  Loss:   6.346 ( 6.346)  Acc@1:   5.762 (  5.762)  Acc@5:  12.402 ( 12.402)
Test: [  48/48]  Time: 0.090 (0.329)  Loss:   6.303 ( 6.430)  Acc@1:   4.835 (  4.878)  Acc@5:  12.382 ( 11.388)
Train: 73 [   0/156 (  1%)]  Loss: 5.80 (5.80)  Time: 1.923s,  532.41/s  (1.923s,  532.41/s)  LR: 2.084e-01  Data: 1.419 (1.419)
Train: 73 [  50/156 ( 33%)]  Loss: 5.93 (5.84)  Time: 0.402s, 2546.26/s  (0.432s, 2368.79/s)  LR: 2.084e-01  Data: 0.026 (0.054)
Train: 73 [ 100/156 ( 65%)]  Loss: 5.99 (5.88)  Time: 0.403s, 2543.78/s  (0.417s, 2454.06/s)  LR: 2.084e-01  Data: 0.027 (0.041)
Train: 73 [ 150/156 ( 97%)]  Loss: 6.06 (5.92)  Time: 0.401s, 2550.61/s  (0.412s, 2482.56/s)  LR: 2.084e-01  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.424 (1.424)  Loss:   6.347 ( 6.347)  Acc@1:   5.273 (  5.273)  Acc@5:  11.719 ( 11.719)
Test: [  48/48]  Time: 0.090 (0.331)  Loss:   6.370 ( 6.443)  Acc@1:   5.896 (  4.864)  Acc@5:  12.736 ( 11.266)
Train: 74 [   0/156 (  1%)]  Loss: 5.73 (5.73)  Time: 1.653s,  619.52/s  (1.653s,  619.52/s)  LR: 2.042e-01  Data: 1.280 (1.280)
Train: 74 [  50/156 ( 33%)]  Loss: 5.86 (5.80)  Time: 0.406s, 2523.59/s  (0.431s, 2376.80/s)  LR: 2.042e-01  Data: 0.027 (0.052)
Train: 74 [ 100/156 ( 65%)]  Loss: 5.99 (5.85)  Time: 0.414s, 2472.20/s  (0.420s, 2439.08/s)  LR: 2.042e-01  Data: 0.030 (0.040)
Train: 74 [ 150/156 ( 97%)]  Loss: 5.96 (5.89)  Time: 0.410s, 2497.34/s  (0.417s, 2453.80/s)  LR: 2.042e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.431 (1.431)  Loss:   6.377 ( 6.377)  Acc@1:   5.273 (  5.273)  Acc@5:  11.719 ( 11.719)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   6.420 ( 6.504)  Acc@1:   5.071 (  4.808)  Acc@5:  11.439 ( 11.074)
Train: 75 [   0/156 (  1%)]  Loss: 5.67 (5.67)  Time: 1.616s,  633.72/s  (1.616s,  633.72/s)  LR: 2.000e-01  Data: 1.239 (1.239)
Train: 75 [  50/156 ( 33%)]  Loss: 5.83 (5.76)  Time: 0.410s, 2497.47/s  (0.441s, 2320.22/s)  LR: 2.000e-01  Data: 0.027 (0.056)
Train: 75 [ 100/156 ( 65%)]  Loss: 5.91 (5.81)  Time: 0.405s, 2530.98/s  (0.425s, 2410.66/s)  LR: 2.000e-01  Data: 0.027 (0.042)
Train: 75 [ 150/156 ( 97%)]  Loss: 5.97 (5.85)  Time: 0.405s, 2527.44/s  (0.418s, 2447.23/s)  LR: 2.000e-01  Data: 0.026 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.455 (1.455)  Loss:   6.348 ( 6.348)  Acc@1:   5.762 (  5.762)  Acc@5:  11.719 ( 11.719)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.425 ( 6.483)  Acc@1:   4.835 (  4.816)  Acc@5:  11.557 ( 11.136)
Train: 76 [   0/156 (  1%)]  Loss: 5.71 (5.71)  Time: 1.892s,  541.25/s  (1.892s,  541.25/s)  LR: 1.958e-01  Data: 1.078 (1.078)
Train: 76 [  50/156 ( 33%)]  Loss: 5.77 (5.70)  Time: 0.408s, 2511.21/s  (0.436s, 2346.11/s)  LR: 1.958e-01  Data: 0.027 (0.047)
Train: 76 [ 100/156 ( 65%)]  Loss: 5.97 (5.77)  Time: 0.411s, 2494.18/s  (0.423s, 2418.12/s)  LR: 1.958e-01  Data: 0.027 (0.037)
Train: 76 [ 150/156 ( 97%)]  Loss: 5.98 (5.81)  Time: 0.411s, 2494.42/s  (0.420s, 2438.81/s)  LR: 1.958e-01  Data: 0.025 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.437 (1.437)  Loss:   6.452 ( 6.452)  Acc@1:   5.078 (  5.078)  Acc@5:  12.012 ( 12.012)
Test: [  48/48]  Time: 0.091 (0.328)  Loss:   6.552 ( 6.602)  Acc@1:   4.717 (  4.350)  Acc@5:  10.967 ( 10.494)
Train: 77 [   0/156 (  1%)]  Loss: 5.73 (5.73)  Time: 1.807s,  566.81/s  (1.807s,  566.81/s)  LR: 1.916e-01  Data: 1.433 (1.433)
Train: 77 [  50/156 ( 33%)]  Loss: 5.65 (5.68)  Time: 0.409s, 2500.77/s  (0.435s, 2355.53/s)  LR: 1.916e-01  Data: 0.027 (0.055)
Train: 77 [ 100/156 ( 65%)]  Loss: 5.85 (5.73)  Time: 0.409s, 2503.12/s  (0.423s, 2422.12/s)  LR: 1.916e-01  Data: 0.027 (0.041)
Train: 77 [ 150/156 ( 97%)]  Loss: 5.89 (5.77)  Time: 0.409s, 2502.04/s  (0.419s, 2441.90/s)  LR: 1.916e-01  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.478 (1.478)  Loss:   6.404 ( 6.404)  Acc@1:   5.371 (  5.371)  Acc@5:  12.500 ( 12.500)
Test: [  48/48]  Time: 0.092 (0.333)  Loss:   6.413 ( 6.528)  Acc@1:   4.717 (  4.746)  Acc@5:  12.028 ( 10.898)
Train: 78 [   0/156 (  1%)]  Loss: 5.67 (5.67)  Time: 1.624s,  630.39/s  (1.624s,  630.39/s)  LR: 1.874e-01  Data: 1.247 (1.247)
Train: 78 [  50/156 ( 33%)]  Loss: 5.69 (5.64)  Time: 0.410s, 2499.80/s  (0.439s, 2332.79/s)  LR: 1.874e-01  Data: 0.028 (0.051)
Train: 78 [ 100/156 ( 65%)]  Loss: 5.77 (5.70)  Time: 0.405s, 2528.26/s  (0.423s, 2419.73/s)  LR: 1.874e-01  Data: 0.027 (0.039)
Train: 78 [ 150/156 ( 97%)]  Loss: 5.79 (5.74)  Time: 0.403s, 2541.44/s  (0.417s, 2454.36/s)  LR: 1.874e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.452 (1.452)  Loss:   6.517 ( 6.517)  Acc@1:   4.980 (  4.980)  Acc@5:  10.938 ( 10.938)
Test: [  48/48]  Time: 0.090 (0.328)  Loss:   6.554 ( 6.622)  Acc@1:   4.953 (  4.544)  Acc@5:  12.028 ( 10.576)
Train: 79 [   0/156 (  1%)]  Loss: 5.54 (5.54)  Time: 1.715s,  597.05/s  (1.715s,  597.05/s)  LR: 1.833e-01  Data: 1.342 (1.342)
Train: 79 [  50/156 ( 33%)]  Loss: 5.69 (5.61)  Time: 0.406s, 2521.56/s  (0.433s, 2366.96/s)  LR: 1.833e-01  Data: 0.026 (0.053)
Train: 79 [ 100/156 ( 65%)]  Loss: 5.79 (5.66)  Time: 0.414s, 2476.01/s  (0.421s, 2430.79/s)  LR: 1.833e-01  Data: 0.032 (0.040)
Train: 79 [ 150/156 ( 97%)]  Loss: 5.91 (5.70)  Time: 0.411s, 2492.11/s  (0.419s, 2444.39/s)  LR: 1.833e-01  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.438 (1.438)  Loss:   6.545 ( 6.545)  Acc@1:   4.492 (  4.492)  Acc@5:  10.254 ( 10.254)
Test: [  48/48]  Time: 0.091 (0.333)  Loss:   6.566 ( 6.615)  Acc@1:   5.071 (  4.456)  Acc@5:  11.910 ( 10.592)
Train: 80 [   0/156 (  1%)]  Loss: 5.55 (5.55)  Time: 1.655s,  618.59/s  (1.655s,  618.59/s)  LR: 1.791e-01  Data: 1.279 (1.279)
Train: 80 [  50/156 ( 33%)]  Loss: 5.68 (5.57)  Time: 0.417s, 2454.37/s  (0.437s, 2343.04/s)  LR: 1.791e-01  Data: 0.026 (0.051)
Train: 80 [ 100/156 ( 65%)]  Loss: 5.77 (5.62)  Time: 0.406s, 2524.09/s  (0.424s, 2417.22/s)  LR: 1.791e-01  Data: 0.025 (0.039)
Train: 80 [ 150/156 ( 97%)]  Loss: 5.80 (5.66)  Time: 0.401s, 2553.02/s  (0.418s, 2452.29/s)  LR: 1.791e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.420 (1.420)  Loss:   6.595 ( 6.595)  Acc@1:   4.688 (  4.688)  Acc@5:  11.133 ( 11.133)
Test: [  48/48]  Time: 0.090 (0.328)  Loss:   6.619 ( 6.681)  Acc@1:   4.127 (  4.576)  Acc@5:  10.377 ( 10.664)
Train: 81 [   0/156 (  1%)]  Loss: 5.47 (5.47)  Time: 1.645s,  622.40/s  (1.645s,  622.40/s)  LR: 1.749e-01  Data: 1.274 (1.274)
Train: 81 [  50/156 ( 33%)]  Loss: 5.59 (5.53)  Time: 0.404s, 2537.14/s  (0.429s, 2387.79/s)  LR: 1.749e-01  Data: 0.025 (0.051)
Train: 81 [ 100/156 ( 65%)]  Loss: 5.66 (5.58)  Time: 0.409s, 2504.41/s  (0.418s, 2447.22/s)  LR: 1.749e-01  Data: 0.027 (0.039)
Train: 81 [ 150/156 ( 97%)]  Loss: 5.77 (5.62)  Time: 0.411s, 2490.25/s  (0.416s, 2461.03/s)  LR: 1.749e-01  Data: 0.024 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.438 (1.438)  Loss:   6.674 ( 6.674)  Acc@1:   3.613 (  3.613)  Acc@5:  10.352 ( 10.352)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   6.740 ( 6.766)  Acc@1:   3.302 (  4.028)  Acc@5:  10.142 (  9.572)
Train: 82 [   0/156 (  1%)]  Loss: 5.47 (5.47)  Time: 1.702s,  601.81/s  (1.702s,  601.81/s)  LR: 1.708e-01  Data: 1.325 (1.325)
Train: 82 [  50/156 ( 33%)]  Loss: 5.62 (5.49)  Time: 0.411s, 2490.22/s  (0.438s, 2337.65/s)  LR: 1.708e-01  Data: 0.027 (0.053)
Train: 82 [ 100/156 ( 65%)]  Loss: 5.58 (5.54)  Time: 0.407s, 2515.99/s  (0.423s, 2420.57/s)  LR: 1.708e-01  Data: 0.026 (0.040)
Train: 82 [ 150/156 ( 97%)]  Loss: 5.69 (5.58)  Time: 0.405s, 2530.02/s  (0.417s, 2453.56/s)  LR: 1.708e-01  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.426 (1.426)  Loss:   6.627 ( 6.627)  Acc@1:   4.590 (  4.590)  Acc@5:  11.133 ( 11.133)
Test: [  48/48]  Time: 0.091 (0.331)  Loss:   6.657 ( 6.741)  Acc@1:   3.892 (  4.244)  Acc@5:  11.321 ( 10.150)
Train: 83 [   0/156 (  1%)]  Loss: 5.31 (5.31)  Time: 1.623s,  630.81/s  (1.623s,  630.81/s)  LR: 1.666e-01  Data: 1.223 (1.223)
Train: 83 [  50/156 ( 33%)]  Loss: 5.57 (5.44)  Time: 0.412s, 2482.69/s  (0.433s, 2366.10/s)  LR: 1.666e-01  Data: 0.029 (0.051)
Train: 83 [ 100/156 ( 65%)]  Loss: 5.59 (5.50)  Time: 0.418s, 2448.70/s  (0.423s, 2422.85/s)  LR: 1.666e-01  Data: 0.028 (0.039)
Train: 83 [ 150/156 ( 97%)]  Loss: 5.65 (5.54)  Time: 0.408s, 2511.18/s  (0.419s, 2441.31/s)  LR: 1.666e-01  Data: 0.027 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.441 (1.441)  Loss:   6.712 ( 6.712)  Acc@1:   4.492 (  4.492)  Acc@5:  10.352 ( 10.352)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.744 ( 6.823)  Acc@1:   4.835 (  4.122)  Acc@5:  10.377 ( 10.032)
Train: 84 [   0/156 (  1%)]  Loss: 5.36 (5.36)  Time: 1.800s,  568.83/s  (1.800s,  568.83/s)  LR: 1.625e-01  Data: 1.423 (1.423)
Train: 84 [  50/156 ( 33%)]  Loss: 5.57 (5.39)  Time: 0.411s, 2490.48/s  (0.440s, 2327.52/s)  LR: 1.625e-01  Data: 0.026 (0.054)
Train: 84 [ 100/156 ( 65%)]  Loss: 5.56 (5.45)  Time: 0.410s, 2494.64/s  (0.426s, 2401.82/s)  LR: 1.625e-01  Data: 0.026 (0.041)
Train: 84 [ 150/156 ( 97%)]  Loss: 5.69 (5.50)  Time: 0.408s, 2510.45/s  (0.422s, 2429.23/s)  LR: 1.625e-01  Data: 0.026 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.505 (1.505)  Loss:   6.674 ( 6.674)  Acc@1:   3.711 (  3.711)  Acc@5:  10.449 ( 10.449)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   6.728 ( 6.824)  Acc@1:   4.009 (  4.130)  Acc@5:   9.316 (  9.796)
Train: 85 [   0/156 (  1%)]  Loss: 5.28 (5.28)  Time: 1.919s,  533.72/s  (1.919s,  533.72/s)  LR: 1.584e-01  Data: 1.099 (1.099)
Train: 85 [  50/156 ( 33%)]  Loss: 5.45 (5.36)  Time: 0.408s, 2507.99/s  (0.437s, 2345.62/s)  LR: 1.584e-01  Data: 0.028 (0.049)
Train: 85 [ 100/156 ( 65%)]  Loss: 5.50 (5.42)  Time: 0.407s, 2513.48/s  (0.422s, 2428.20/s)  LR: 1.584e-01  Data: 0.027 (0.038)
Train: 85 [ 150/156 ( 97%)]  Loss: 5.65 (5.46)  Time: 0.409s, 2503.89/s  (0.418s, 2450.03/s)  LR: 1.584e-01  Data: 0.025 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.431 (1.431)  Loss:   6.706 ( 6.706)  Acc@1:   4.199 (  4.199)  Acc@5:   9.961 (  9.961)
Test: [  48/48]  Time: 0.092 (0.328)  Loss:   6.802 ( 6.838)  Acc@1:   3.892 (  4.000)  Acc@5:   8.491 (  9.350)
Train: 86 [   0/156 (  1%)]  Loss: 5.26 (5.26)  Time: 1.666s,  614.83/s  (1.666s,  614.83/s)  LR: 1.543e-01  Data: 1.066 (1.066)
Train: 86 [  50/156 ( 33%)]  Loss: 5.46 (5.31)  Time: 0.412s, 2484.06/s  (0.437s, 2344.52/s)  LR: 1.543e-01  Data: 0.028 (0.048)
Train: 86 [ 100/156 ( 65%)]  Loss: 5.57 (5.37)  Time: 0.414s, 2475.48/s  (0.425s, 2409.73/s)  LR: 1.543e-01  Data: 0.027 (0.038)
Train: 86 [ 150/156 ( 97%)]  Loss: 5.64 (5.42)  Time: 0.409s, 2503.93/s  (0.421s, 2433.31/s)  LR: 1.543e-01  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.430 (1.430)  Loss:   6.676 ( 6.676)  Acc@1:   5.273 (  5.273)  Acc@5:  10.938 ( 10.938)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.885 ( 6.874)  Acc@1:   4.009 (  4.344)  Acc@5:  10.024 (  9.868)
Train: 87 [   0/156 (  1%)]  Loss: 5.32 (5.32)  Time: 1.627s,  629.43/s  (1.627s,  629.43/s)  LR: 1.503e-01  Data: 1.140 (1.140)
Train: 87 [  50/156 ( 33%)]  Loss: 5.36 (5.27)  Time: 0.412s, 2487.89/s  (0.436s, 2351.16/s)  LR: 1.503e-01  Data: 0.027 (0.049)
Train: 87 [ 100/156 ( 65%)]  Loss: 5.41 (5.33)  Time: 0.413s, 2479.41/s  (0.424s, 2417.65/s)  LR: 1.503e-01  Data: 0.028 (0.038)
Train: 87 [ 150/156 ( 97%)]  Loss: 5.53 (5.38)  Time: 0.409s, 2502.20/s  (0.420s, 2438.65/s)  LR: 1.503e-01  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.433 (1.433)  Loss:   6.853 ( 6.853)  Acc@1:   4.199 (  4.199)  Acc@5:  10.156 ( 10.156)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   7.009 ( 6.985)  Acc@1:   3.892 (  4.014)  Acc@5:   9.316 (  9.266)
Train: 88 [   0/156 (  1%)]  Loss: 5.17 (5.17)  Time: 1.607s,  637.16/s  (1.607s,  637.16/s)  LR: 1.462e-01  Data: 1.058 (1.058)
Train: 88 [  50/156 ( 33%)]  Loss: 5.33 (5.23)  Time: 0.415s, 2465.79/s  (0.435s, 2353.47/s)  LR: 1.462e-01  Data: 0.029 (0.048)
Train: 88 [ 100/156 ( 65%)]  Loss: 5.45 (5.29)  Time: 0.408s, 2508.28/s  (0.424s, 2415.60/s)  LR: 1.462e-01  Data: 0.026 (0.038)
Train: 88 [ 150/156 ( 97%)]  Loss: 5.54 (5.34)  Time: 0.407s, 2513.15/s  (0.419s, 2442.75/s)  LR: 1.462e-01  Data: 0.027 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.514 (1.514)  Loss:   6.810 ( 6.810)  Acc@1:   4.102 (  4.102)  Acc@5:   9.668 (  9.668)
Test: [  48/48]  Time: 0.091 (0.331)  Loss:   6.931 ( 6.995)  Acc@1:   3.184 (  3.900)  Acc@5:  10.259 (  9.268)
Train: 89 [   0/156 (  1%)]  Loss: 5.17 (5.17)  Time: 1.616s,  633.78/s  (1.616s,  633.78/s)  LR: 1.422e-01  Data: 1.240 (1.240)
Train: 89 [  50/156 ( 33%)]  Loss: 5.24 (5.21)  Time: 0.411s, 2490.81/s  (0.436s, 2349.78/s)  LR: 1.422e-01  Data: 0.027 (0.051)
Train: 89 [ 100/156 ( 65%)]  Loss: 5.36 (5.25)  Time: 0.407s, 2515.17/s  (0.423s, 2419.40/s)  LR: 1.422e-01  Data: 0.028 (0.039)
Train: 89 [ 150/156 ( 97%)]  Loss: 5.38 (5.29)  Time: 0.405s, 2527.62/s  (0.418s, 2450.83/s)  LR: 1.422e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.434 (1.434)  Loss:   6.971 ( 6.971)  Acc@1:   3.223 (  3.223)  Acc@5:   8.301 (  8.301)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   6.968 ( 7.086)  Acc@1:   3.420 (  3.644)  Acc@5:   9.906 (  8.914)
Train: 90 [   0/156 (  1%)]  Loss: 5.16 (5.16)  Time: 1.433s,  714.41/s  (1.433s,  714.41/s)  LR: 1.382e-01  Data: 1.044 (1.044)
Train: 90 [  50/156 ( 33%)]  Loss: 5.22 (5.14)  Time: 0.411s, 2490.41/s  (0.428s, 2389.77/s)  LR: 1.382e-01  Data: 0.027 (0.048)
Train: 90 [ 100/156 ( 65%)]  Loss: 5.28 (5.19)  Time: 0.413s, 2480.82/s  (0.420s, 2438.28/s)  LR: 1.382e-01  Data: 0.026 (0.037)
Train: 90 [ 150/156 ( 97%)]  Loss: 5.23 (5.25)  Time: 0.410s, 2499.44/s  (0.418s, 2451.94/s)  LR: 1.382e-01  Data: 0.025 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.454 (1.454)  Loss:   6.983 ( 6.983)  Acc@1:   3.418 (  3.418)  Acc@5:   8.984 (  8.984)
Test: [  48/48]  Time: 0.092 (0.331)  Loss:   6.916 ( 7.064)  Acc@1:   4.599 (  3.982)  Acc@5:   9.788 (  9.348)
Train: 91 [   0/156 (  1%)]  Loss: 5.04 (5.04)  Time: 1.651s,  620.16/s  (1.651s,  620.16/s)  LR: 1.342e-01  Data: 1.276 (1.276)
Train: 91 [  50/156 ( 33%)]  Loss: 5.06 (5.10)  Time: 0.413s, 2477.93/s  (0.436s, 2347.24/s)  LR: 1.342e-01  Data: 0.028 (0.052)
Train: 91 [ 100/156 ( 65%)]  Loss: 5.40 (5.16)  Time: 0.402s, 2547.95/s  (0.421s, 2431.29/s)  LR: 1.342e-01  Data: 0.027 (0.039)
Train: 91 [ 150/156 ( 97%)]  Loss: 5.36 (5.21)  Time: 0.398s, 2570.49/s  (0.414s, 2471.09/s)  LR: 1.342e-01  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.430 (1.430)  Loss:   6.971 ( 6.971)  Acc@1:   3.809 (  3.809)  Acc@5:  10.352 ( 10.352)
Test: [  48/48]  Time: 0.089 (0.330)  Loss:   7.119 ( 7.174)  Acc@1:   3.774 (  3.786)  Acc@5:   9.080 (  9.094)
Train: 92 [   0/156 (  1%)]  Loss: 5.04 (5.04)  Time: 1.733s,  590.79/s  (1.733s,  590.79/s)  LR: 1.303e-01  Data: 1.368 (1.368)
Train: 92 [  50/156 ( 33%)]  Loss: 5.07 (5.06)  Time: 0.398s, 2571.71/s  (0.425s, 2412.19/s)  LR: 1.303e-01  Data: 0.026 (0.053)
Train: 92 [ 100/156 ( 65%)]  Loss: 5.19 (5.11)  Time: 0.402s, 2549.60/s  (0.412s, 2482.67/s)  LR: 1.303e-01  Data: 0.028 (0.041)
Train: 92 [ 150/156 ( 97%)]  Loss: 5.28 (5.16)  Time: 0.400s, 2562.15/s  (0.409s, 2505.09/s)  LR: 1.303e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.444 (1.444)  Loss:   7.055 ( 7.055)  Acc@1:   3.809 (  3.809)  Acc@5:   9.180 (  9.180)
Test: [  48/48]  Time: 0.090 (0.331)  Loss:   7.176 ( 7.217)  Acc@1:   3.774 (  3.572)  Acc@5:   8.726 (  8.814)
Train: 93 [   0/156 (  1%)]  Loss: 4.87 (4.87)  Time: 1.562s,  655.61/s  (1.562s,  655.61/s)  LR: 1.264e-01  Data: 1.193 (1.193)
Train: 93 [  50/156 ( 33%)]  Loss: 5.07 (5.02)  Time: 0.406s, 2522.26/s  (0.427s, 2396.52/s)  LR: 1.264e-01  Data: 0.026 (0.050)
Train: 93 [ 100/156 ( 65%)]  Loss: 5.13 (5.07)  Time: 0.407s, 2516.72/s  (0.417s, 2455.26/s)  LR: 1.264e-01  Data: 0.027 (0.039)
Train: 93 [ 150/156 ( 97%)]  Loss: 5.31 (5.12)  Time: 0.406s, 2521.07/s  (0.415s, 2470.30/s)  LR: 1.264e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.447 (1.447)  Loss:   7.049 ( 7.049)  Acc@1:   3.906 (  3.906)  Acc@5:   9.863 (  9.863)
Test: [  48/48]  Time: 0.092 (0.330)  Loss:   7.159 ( 7.251)  Acc@1:   4.009 (  3.692)  Acc@5:  10.142 (  8.898)
Train: 94 [   0/156 (  1%)]  Loss: 4.91 (4.91)  Time: 1.679s,  609.78/s  (1.679s,  609.78/s)  LR: 1.225e-01  Data: 1.303 (1.303)
Train: 94 [  50/156 ( 33%)]  Loss: 5.08 (4.98)  Time: 0.406s, 2523.87/s  (0.434s, 2358.56/s)  LR: 1.225e-01  Data: 0.027 (0.052)
Train: 94 [ 100/156 ( 65%)]  Loss: 5.06 (5.03)  Time: 0.408s, 2508.40/s  (0.421s, 2429.85/s)  LR: 1.225e-01  Data: 0.027 (0.040)
Train: 94 [ 150/156 ( 97%)]  Loss: 5.23 (5.08)  Time: 0.412s, 2483.55/s  (0.418s, 2448.76/s)  LR: 1.225e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.435 (1.435)  Loss:   6.992 ( 6.992)  Acc@1:   5.273 (  5.273)  Acc@5:  11.133 ( 11.133)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   7.020 ( 7.183)  Acc@1:   4.009 (  3.702)  Acc@5:   9.788 (  8.880)
Train: 95 [   0/156 (  1%)]  Loss: 4.91 (4.91)  Time: 1.691s,  605.68/s  (1.691s,  605.68/s)  LR: 1.187e-01  Data: 1.315 (1.315)
Train: 95 [  50/156 ( 33%)]  Loss: 5.08 (4.93)  Time: 0.411s, 2491.78/s  (0.437s, 2343.62/s)  LR: 1.187e-01  Data: 0.027 (0.052)
Train: 95 [ 100/156 ( 65%)]  Loss: 5.07 (4.99)  Time: 0.409s, 2501.37/s  (0.424s, 2417.59/s)  LR: 1.187e-01  Data: 0.026 (0.040)
Train: 95 [ 150/156 ( 97%)]  Loss: 5.12 (5.03)  Time: 0.410s, 2496.88/s  (0.419s, 2444.43/s)  LR: 1.187e-01  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.454 (1.454)  Loss:   7.191 ( 7.191)  Acc@1:   3.418 (  3.418)  Acc@5:   8.789 (  8.789)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   7.188 ( 7.353)  Acc@1:   3.420 (  3.686)  Acc@5:  10.377 (  8.836)
Train: 96 [   0/156 (  1%)]  Loss: 4.90 (4.90)  Time: 1.682s,  608.85/s  (1.682s,  608.85/s)  LR: 1.148e-01  Data: 1.306 (1.306)
Train: 96 [  50/156 ( 33%)]  Loss: 4.96 (4.88)  Time: 0.409s, 2502.07/s  (0.437s, 2342.70/s)  LR: 1.148e-01  Data: 0.028 (0.053)
Train: 96 [ 100/156 ( 65%)]  Loss: 5.17 (4.94)  Time: 0.404s, 2536.13/s  (0.422s, 2427.17/s)  LR: 1.148e-01  Data: 0.027 (0.040)
Train: 96 [ 150/156 ( 97%)]  Loss: 5.22 (4.99)  Time: 0.403s, 2538.06/s  (0.416s, 2459.30/s)  LR: 1.148e-01  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.435 (1.435)  Loss:   7.166 ( 7.166)  Acc@1:   3.320 (  3.320)  Acc@5:   9.082 (  9.082)
Test: [  48/48]  Time: 0.090 (0.328)  Loss:   7.195 ( 7.336)  Acc@1:   3.892 (  3.622)  Acc@5:   9.552 (  8.524)
Train: 97 [   0/156 (  1%)]  Loss: 4.80 (4.80)  Time: 1.658s,  617.77/s  (1.658s,  617.77/s)  LR: 1.111e-01  Data: 1.284 (1.284)
Train: 97 [  50/156 ( 33%)]  Loss: 4.81 (4.85)  Time: 0.407s, 2514.52/s  (0.432s, 2369.12/s)  LR: 1.111e-01  Data: 0.026 (0.052)
Train: 97 [ 100/156 ( 65%)]  Loss: 5.02 (4.90)  Time: 0.412s, 2488.04/s  (0.422s, 2426.99/s)  LR: 1.111e-01  Data: 0.026 (0.040)
Train: 97 [ 150/156 ( 97%)]  Loss: 5.13 (4.95)  Time: 0.409s, 2502.26/s  (0.419s, 2446.06/s)  LR: 1.111e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.548 (1.548)  Loss:   7.199 ( 7.199)  Acc@1:   4.297 (  4.297)  Acc@5:   9.375 (  9.375)
Test: [  48/48]  Time: 0.092 (0.333)  Loss:   7.240 ( 7.344)  Acc@1:   3.656 (  3.442)  Acc@5:   9.434 (  8.568)
Train: 98 [   0/156 (  1%)]  Loss: 4.74 (4.74)  Time: 1.710s,  598.89/s  (1.710s,  598.89/s)  LR: 1.073e-01  Data: 1.284 (1.284)
Train: 98 [  50/156 ( 33%)]  Loss: 4.87 (4.80)  Time: 0.405s, 2525.90/s  (0.434s, 2356.75/s)  LR: 1.073e-01  Data: 0.027 (0.051)
Train: 98 [ 100/156 ( 65%)]  Loss: 4.97 (4.87)  Time: 0.403s, 2542.02/s  (0.420s, 2438.61/s)  LR: 1.073e-01  Data: 0.027 (0.040)
Train: 98 [ 150/156 ( 97%)]  Loss: 5.00 (4.91)  Time: 0.402s, 2545.47/s  (0.415s, 2469.25/s)  LR: 1.073e-01  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.424 (1.424)  Loss:   7.183 ( 7.183)  Acc@1:   3.711 (  3.711)  Acc@5:  10.254 ( 10.254)
Test: [  48/48]  Time: 0.090 (0.329)  Loss:   7.218 ( 7.334)  Acc@1:   3.302 (  3.472)  Acc@5:   8.608 (  8.536)
Train: 99 [   0/156 (  1%)]  Loss: 4.65 (4.65)  Time: 2.024s,  505.99/s  (2.024s,  505.99/s)  LR: 1.036e-01  Data: 1.651 (1.651)
Train: 99 [  50/156 ( 33%)]  Loss: 4.83 (4.74)  Time: 0.407s, 2514.52/s  (0.439s, 2333.36/s)  LR: 1.036e-01  Data: 0.027 (0.059)
Train: 99 [ 100/156 ( 65%)]  Loss: 4.84 (4.80)  Time: 0.411s, 2489.01/s  (0.425s, 2407.00/s)  LR: 1.036e-01  Data: 0.028 (0.043)
Train: 99 [ 150/156 ( 97%)]  Loss: 4.96 (4.85)  Time: 0.408s, 2510.04/s  (0.421s, 2433.22/s)  LR: 1.036e-01  Data: 0.025 (0.038)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.461 (1.461)  Loss:   7.380 ( 7.380)  Acc@1:   3.418 (  3.418)  Acc@5:   8.496 (  8.496)
Test: [  48/48]  Time: 0.090 (0.330)  Loss:   7.423 ( 7.509)  Acc@1:   3.656 (  3.558)  Acc@5:   9.316 (  8.440)
Train: 100 [   0/156 (  1%)]  Loss: 4.70 (4.70)  Time: 1.633s,  627.23/s  (1.633s,  627.23/s)  LR: 1.000e-01  Data: 1.261 (1.261)
Train: 100 [  50/156 ( 33%)]  Loss: 4.78 (4.72)  Time: 0.404s, 2533.23/s  (0.429s, 2388.19/s)  LR: 1.000e-01  Data: 0.027 (0.052)
Train: 100 [ 100/156 ( 65%)]  Loss: 4.85 (4.77)  Time: 0.408s, 2512.66/s  (0.418s, 2452.37/s)  LR: 1.000e-01  Data: 0.027 (0.039)
Train: 100 [ 150/156 ( 97%)]  Loss: 4.88 (4.81)  Time: 0.408s, 2508.21/s  (0.415s, 2467.62/s)  LR: 1.000e-01  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.436 (1.436)  Loss:   7.263 ( 7.263)  Acc@1:   3.613 (  3.613)  Acc@5:   9.961 (  9.961)
Test: [  48/48]  Time: 0.092 (0.331)  Loss:   7.401 ( 7.502)  Acc@1:   3.538 (  3.294)  Acc@5:   9.198 (  8.270)
Train: 101 [   0/156 (  1%)]  Loss: 4.62 (4.62)  Time: 1.580s,  648.06/s  (1.580s,  648.06/s)  LR: 9.639e-02  Data: 1.060 (1.060)
Train: 101 [  50/156 ( 33%)]  Loss: 4.69 (4.68)  Time: 0.409s, 2500.68/s  (0.433s, 2365.58/s)  LR: 9.639e-02  Data: 0.029 (0.047)
Train: 101 [ 100/156 ( 65%)]  Loss: 4.72 (4.72)  Time: 0.416s, 2463.89/s  (0.421s, 2433.44/s)  LR: 9.639e-02  Data: 0.026 (0.037)
Train: 101 [ 150/156 ( 97%)]  Loss: 4.89 (4.76)  Time: 0.412s, 2486.73/s  (0.418s, 2451.81/s)  LR: 9.639e-02  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.431 (1.431)  Loss:   7.284 ( 7.284)  Acc@1:   3.613 (  3.613)  Acc@5:   8.789 (  8.789)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   7.352 ( 7.522)  Acc@1:   4.127 (  3.372)  Acc@5:   9.552 (  8.220)
Train: 102 [   0/156 (  1%)]  Loss: 4.45 (4.45)  Time: 2.045s,  500.77/s  (2.045s,  500.77/s)  LR: 9.283e-02  Data: 1.670 (1.670)
Train: 102 [  50/156 ( 33%)]  Loss: 4.74 (4.62)  Time: 0.409s, 2502.16/s  (0.440s, 2327.84/s)  LR: 9.283e-02  Data: 0.028 (0.059)
Train: 102 [ 100/156 ( 65%)]  Loss: 4.79 (4.68)  Time: 0.412s, 2486.89/s  (0.425s, 2407.44/s)  LR: 9.283e-02  Data: 0.026 (0.043)
Train: 102 [ 150/156 ( 97%)]  Loss: 4.83 (4.73)  Time: 0.410s, 2494.97/s  (0.421s, 2433.76/s)  LR: 9.283e-02  Data: 0.027 (0.038)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.435 (1.435)  Loss:   7.353 ( 7.353)  Acc@1:   3.613 (  3.613)  Acc@5:   9.082 (  9.082)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   7.468 ( 7.586)  Acc@1:   3.774 (  3.384)  Acc@5:   8.844 (  8.354)
Train: 103 [   0/156 (  1%)]  Loss: 4.51 (4.51)  Time: 1.665s,  614.92/s  (1.665s,  614.92/s)  LR: 8.932e-02  Data: 1.266 (1.266)
Train: 103 [  50/156 ( 33%)]  Loss: 4.51 (4.59)  Time: 0.411s, 2491.69/s  (0.437s, 2342.88/s)  LR: 8.932e-02  Data: 0.027 (0.051)
Train: 103 [ 100/156 ( 65%)]  Loss: 4.73 (4.64)  Time: 0.414s, 2474.64/s  (0.425s, 2410.53/s)  LR: 8.932e-02  Data: 0.027 (0.039)
Train: 103 [ 150/156 ( 97%)]  Loss: 4.85 (4.68)  Time: 0.411s, 2494.31/s  (0.420s, 2436.28/s)  LR: 8.932e-02  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.457 (1.457)  Loss:   7.381 ( 7.381)  Acc@1:   4.102 (  4.102)  Acc@5:   9.668 (  9.668)
Test: [  48/48]  Time: 0.092 (0.328)  Loss:   7.525 ( 7.631)  Acc@1:   3.420 (  3.354)  Acc@5:   8.019 (  8.334)
Train: 104 [   0/156 (  1%)]  Loss: 4.55 (4.55)  Time: 1.621s,  631.73/s  (1.621s,  631.73/s)  LR: 8.586e-02  Data: 1.128 (1.128)
Train: 104 [  50/156 ( 33%)]  Loss: 4.72 (4.55)  Time: 0.410s, 2497.79/s  (0.437s, 2342.59/s)  LR: 8.586e-02  Data: 0.027 (0.049)
Train: 104 [ 100/156 ( 65%)]  Loss: 4.73 (4.60)  Time: 0.413s, 2480.04/s  (0.424s, 2415.76/s)  LR: 8.586e-02  Data: 0.031 (0.038)
Train: 104 [ 150/156 ( 97%)]  Loss: 4.72 (4.64)  Time: 0.409s, 2505.97/s  (0.420s, 2438.77/s)  LR: 8.586e-02  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.467 (1.467)  Loss:   7.467 ( 7.467)  Acc@1:   4.004 (  4.004)  Acc@5:   9.668 (  9.668)
Test: [  48/48]  Time: 0.091 (0.327)  Loss:   7.468 ( 7.612)  Acc@1:   3.302 (  3.122)  Acc@5:   8.962 (  8.058)
Train: 105 [   0/156 (  1%)]  Loss: 4.64 (4.64)  Time: 1.635s,  626.47/s  (1.635s,  626.47/s)  LR: 8.244e-02  Data: 1.260 (1.260)
Train: 105 [  50/156 ( 33%)]  Loss: 4.54 (4.50)  Time: 0.410s, 2498.47/s  (0.433s, 2365.87/s)  LR: 8.244e-02  Data: 0.028 (0.051)
Train: 105 [ 100/156 ( 65%)]  Loss: 4.67 (4.56)  Time: 0.412s, 2484.37/s  (0.423s, 2421.89/s)  LR: 8.244e-02  Data: 0.026 (0.039)
Train: 105 [ 150/156 ( 97%)]  Loss: 4.75 (4.59)  Time: 0.410s, 2499.52/s  (0.419s, 2441.04/s)  LR: 8.244e-02  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.473 (1.473)  Loss:   7.571 ( 7.571)  Acc@1:   3.418 (  3.418)  Acc@5:   9.473 (  9.473)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   7.598 ( 7.758)  Acc@1:   3.538 (  3.210)  Acc@5:   8.137 (  7.982)
Train: 106 [   0/156 (  1%)]  Loss: 4.30 (4.30)  Time: 1.774s,  577.35/s  (1.774s,  577.35/s)  LR: 7.908e-02  Data: 1.397 (1.397)
Train: 106 [  50/156 ( 33%)]  Loss: 4.62 (4.45)  Time: 0.413s, 2479.32/s  (0.440s, 2327.34/s)  LR: 7.908e-02  Data: 0.027 (0.054)
Train: 106 [ 100/156 ( 65%)]  Loss: 4.61 (4.50)  Time: 0.409s, 2500.70/s  (0.426s, 2404.05/s)  LR: 7.908e-02  Data: 0.028 (0.041)
Train: 106 [ 150/156 ( 97%)]  Loss: 4.68 (4.55)  Time: 0.412s, 2488.00/s  (0.421s, 2432.95/s)  LR: 7.908e-02  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.465 (1.465)  Loss:   7.579 ( 7.579)  Acc@1:   3.809 (  3.809)  Acc@5:   8.398 (  8.398)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   7.585 ( 7.729)  Acc@1:   3.420 (  3.324)  Acc@5:   8.491 (  8.072)
Train: 107 [   0/156 (  1%)]  Loss: 4.32 (4.32)  Time: 1.833s,  558.64/s  (1.833s,  558.64/s)  LR: 7.577e-02  Data: 1.458 (1.458)
Train: 107 [  50/156 ( 33%)]  Loss: 4.47 (4.43)  Time: 0.406s, 2519.79/s  (0.435s, 2356.54/s)  LR: 7.577e-02  Data: 0.027 (0.056)
Train: 107 [ 100/156 ( 65%)]  Loss: 4.51 (4.47)  Time: 0.408s, 2512.74/s  (0.420s, 2436.93/s)  LR: 7.577e-02  Data: 0.028 (0.041)
Train: 107 [ 150/156 ( 97%)]  Loss: 4.62 (4.51)  Time: 0.410s, 2498.03/s  (0.416s, 2460.97/s)  LR: 7.577e-02  Data: 0.026 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.444 (1.444)  Loss:   7.623 ( 7.623)  Acc@1:   3.223 (  3.223)  Acc@5:   8.105 (  8.105)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   7.647 ( 7.820)  Acc@1:   2.948 (  3.124)  Acc@5:   8.491 (  7.950)
Train: 108 [   0/156 (  1%)]  Loss: 4.40 (4.40)  Time: 1.727s,  593.05/s  (1.727s,  593.05/s)  LR: 7.252e-02  Data: 1.350 (1.350)
Train: 108 [  50/156 ( 33%)]  Loss: 4.38 (4.38)  Time: 0.410s, 2496.61/s  (0.439s, 2335.22/s)  LR: 7.252e-02  Data: 0.027 (0.054)
Train: 108 [ 100/156 ( 65%)]  Loss: 4.48 (4.43)  Time: 0.404s, 2534.25/s  (0.423s, 2422.49/s)  LR: 7.252e-02  Data: 0.027 (0.041)
Train: 108 [ 150/156 ( 97%)]  Loss: 4.61 (4.47)  Time: 0.400s, 2560.24/s  (0.416s, 2460.08/s)  LR: 7.252e-02  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.459 (1.459)  Loss:   7.587 ( 7.587)  Acc@1:   3.320 (  3.320)  Acc@5:   8.008 (  8.008)
Test: [  48/48]  Time: 0.089 (0.330)  Loss:   7.584 ( 7.744)  Acc@1:   3.184 (  3.158)  Acc@5:   9.670 (  7.872)
Train: 109 [   0/156 (  1%)]  Loss: 4.25 (4.25)  Time: 2.024s,  505.98/s  (2.024s,  505.98/s)  LR: 6.932e-02  Data: 1.654 (1.654)
Train: 109 [  50/156 ( 33%)]  Loss: 4.41 (4.34)  Time: 0.407s, 2513.42/s  (0.435s, 2354.37/s)  LR: 6.932e-02  Data: 0.027 (0.059)
Train: 109 [ 100/156 ( 65%)]  Loss: 4.45 (4.38)  Time: 0.407s, 2518.21/s  (0.420s, 2438.78/s)  LR: 6.932e-02  Data: 0.029 (0.043)
Train: 109 [ 150/156 ( 97%)]  Loss: 4.46 (4.42)  Time: 0.407s, 2517.40/s  (0.415s, 2464.96/s)  LR: 6.932e-02  Data: 0.026 (0.038)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.423 (1.423)  Loss:   7.793 ( 7.793)  Acc@1:   3.418 (  3.418)  Acc@5:   7.617 (  7.617)
Test: [  48/48]  Time: 0.091 (0.327)  Loss:   7.771 ( 7.943)  Acc@1:   2.948 (  3.042)  Acc@5:   8.726 (  7.850)
Train: 110 [   0/156 (  1%)]  Loss: 4.20 (4.20)  Time: 1.846s,  554.78/s  (1.846s,  554.78/s)  LR: 6.617e-02  Data: 1.469 (1.469)
Train: 110 [  50/156 ( 33%)]  Loss: 4.38 (4.30)  Time: 0.414s, 2475.25/s  (0.439s, 2329.97/s)  LR: 6.617e-02  Data: 0.027 (0.055)
Train: 110 [ 100/156 ( 65%)]  Loss: 4.31 (4.34)  Time: 0.412s, 2482.56/s  (0.426s, 2405.42/s)  LR: 6.617e-02  Data: 0.027 (0.041)
Train: 110 [ 150/156 ( 97%)]  Loss: 4.51 (4.38)  Time: 0.409s, 2501.29/s  (0.421s, 2430.42/s)  LR: 6.617e-02  Data: 0.025 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.460 (1.460)  Loss:   7.781 ( 7.781)  Acc@1:   3.027 (  3.027)  Acc@5:   8.691 (  8.691)
Test: [  48/48]  Time: 0.090 (0.333)  Loss:   7.764 ( 7.942)  Acc@1:   3.066 (  3.034)  Acc@5:   7.901 (  7.638)
Train: 111 [   0/156 (  1%)]  Loss: 4.23 (4.23)  Time: 1.715s,  597.13/s  (1.715s,  597.13/s)  LR: 6.309e-02  Data: 1.093 (1.093)
Train: 111 [  50/156 ( 33%)]  Loss: 4.23 (4.26)  Time: 0.408s, 2507.95/s  (0.433s, 2366.39/s)  LR: 6.309e-02  Data: 0.027 (0.048)
Train: 111 [ 100/156 ( 65%)]  Loss: 4.50 (4.31)  Time: 0.410s, 2499.91/s  (0.422s, 2428.78/s)  LR: 6.309e-02  Data: 0.027 (0.038)
Train: 111 [ 150/156 ( 97%)]  Loss: 4.28 (4.35)  Time: 0.410s, 2498.32/s  (0.418s, 2447.84/s)  LR: 6.309e-02  Data: 0.025 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.448 (1.448)  Loss:   7.700 ( 7.700)  Acc@1:   3.613 (  3.613)  Acc@5:   9.082 (  9.082)
Test: [  48/48]  Time: 0.091 (0.328)  Loss:   7.708 ( 7.903)  Acc@1:   3.302 (  3.120)  Acc@5:   8.844 (  7.888)
Train: 112 [   0/156 (  1%)]  Loss: 4.17 (4.17)  Time: 1.880s,  544.82/s  (1.880s,  544.82/s)  LR: 6.007e-02  Data: 1.173 (1.173)
Train: 112 [  50/156 ( 33%)]  Loss: 4.29 (4.22)  Time: 0.408s, 2507.50/s  (0.437s, 2341.21/s)  LR: 6.007e-02  Data: 0.025 (0.050)
Train: 112 [ 100/156 ( 65%)]  Loss: 4.34 (4.27)  Time: 0.412s, 2483.47/s  (0.423s, 2418.44/s)  LR: 6.007e-02  Data: 0.027 (0.039)
Train: 112 [ 150/156 ( 97%)]  Loss: 4.38 (4.30)  Time: 0.411s, 2490.38/s  (0.420s, 2437.27/s)  LR: 6.007e-02  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.453 (1.453)  Loss:   7.805 ( 7.805)  Acc@1:   3.809 (  3.809)  Acc@5:   8.594 (  8.594)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   7.816 ( 8.014)  Acc@1:   3.420 (  3.088)  Acc@5:   8.726 (  7.732)
Train: 113 [   0/156 (  1%)]  Loss: 4.05 (4.05)  Time: 1.678s,  610.41/s  (1.678s,  610.41/s)  LR: 5.711e-02  Data: 1.301 (1.301)
Train: 113 [  50/156 ( 33%)]  Loss: 4.28 (4.19)  Time: 0.408s, 2511.33/s  (0.436s, 2350.14/s)  LR: 5.711e-02  Data: 0.023 (0.052)
Train: 113 [ 100/156 ( 65%)]  Loss: 4.20 (4.23)  Time: 0.413s, 2481.61/s  (0.424s, 2413.88/s)  LR: 5.711e-02  Data: 0.027 (0.040)
Train: 113 [ 150/156 ( 97%)]  Loss: 4.39 (4.26)  Time: 0.409s, 2501.32/s  (0.420s, 2435.34/s)  LR: 5.711e-02  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.441 (1.441)  Loss:   7.670 ( 7.670)  Acc@1:   2.832 (  2.832)  Acc@5:   8.789 (  8.789)
Test: [  48/48]  Time: 0.091 (0.331)  Loss:   7.758 ( 7.881)  Acc@1:   3.656 (  2.984)  Acc@5:   8.373 (  7.598)
Train: 114 [   0/156 (  1%)]  Loss: 4.14 (4.14)  Time: 1.871s,  547.28/s  (1.871s,  547.28/s)  LR: 5.421e-02  Data: 1.171 (1.171)
Train: 114 [  50/156 ( 33%)]  Loss: 4.29 (4.13)  Time: 0.413s, 2480.21/s  (0.439s, 2333.35/s)  LR: 5.421e-02  Data: 0.027 (0.050)
Train: 114 [ 100/156 ( 65%)]  Loss: 4.23 (4.18)  Time: 0.406s, 2521.80/s  (0.425s, 2407.59/s)  LR: 5.421e-02  Data: 0.027 (0.038)
Train: 114 [ 150/156 ( 97%)]  Loss: 4.36 (4.22)  Time: 0.401s, 2552.63/s  (0.419s, 2444.73/s)  LR: 5.421e-02  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.435 (1.435)  Loss:   7.891 ( 7.891)  Acc@1:   3.613 (  3.613)  Acc@5:   8.691 (  8.691)
Test: [  48/48]  Time: 0.090 (0.328)  Loss:   7.908 ( 8.092)  Acc@1:   3.184 (  3.100)  Acc@5:   8.491 (  7.548)
Train: 115 [   0/156 (  1%)]  Loss: 3.95 (3.95)  Time: 1.626s,  629.89/s  (1.626s,  629.89/s)  LR: 5.137e-02  Data: 1.257 (1.257)
Train: 115 [  50/156 ( 33%)]  Loss: 4.04 (4.11)  Time: 0.410s, 2496.87/s  (0.427s, 2397.22/s)  LR: 5.137e-02  Data: 0.030 (0.051)
Train: 115 [ 100/156 ( 65%)]  Loss: 4.22 (4.15)  Time: 0.406s, 2524.48/s  (0.416s, 2458.81/s)  LR: 5.137e-02  Data: 0.027 (0.039)
Train: 115 [ 150/156 ( 97%)]  Loss: 4.25 (4.18)  Time: 0.405s, 2525.86/s  (0.414s, 2473.83/s)  LR: 5.137e-02  Data: 0.024 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.502 (1.502)  Loss:   7.987 ( 7.987)  Acc@1:   2.832 (  2.832)  Acc@5:   7.422 (  7.422)
Test: [  48/48]  Time: 0.092 (0.328)  Loss:   7.992 ( 8.143)  Acc@1:   2.594 (  2.946)  Acc@5:   7.783 (  7.406)
Train: 116 [   0/156 (  1%)]  Loss: 3.99 (3.99)  Time: 1.606s,  637.71/s  (1.606s,  637.71/s)  LR: 4.860e-02  Data: 1.146 (1.146)
Train: 116 [  50/156 ( 33%)]  Loss: 4.11 (4.08)  Time: 0.411s, 2489.67/s  (0.435s, 2355.00/s)  LR: 4.860e-02  Data: 0.027 (0.049)
Train: 116 [ 100/156 ( 65%)]  Loss: 4.23 (4.11)  Time: 0.412s, 2486.93/s  (0.423s, 2418.54/s)  LR: 4.860e-02  Data: 0.029 (0.038)
Train: 116 [ 150/156 ( 97%)]  Loss: 4.29 (4.15)  Time: 0.411s, 2490.08/s  (0.420s, 2440.30/s)  LR: 4.860e-02  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.429 (1.429)  Loss:   8.148 ( 8.148)  Acc@1:   2.637 (  2.637)  Acc@5:   8.008 (  8.008)
Test: [  48/48]  Time: 0.091 (0.331)  Loss:   8.154 ( 8.286)  Acc@1:   3.066 (  2.954)  Acc@5:   7.193 (  7.342)
Train: 117 [   0/156 (  1%)]  Loss: 4.01 (4.01)  Time: 1.631s,  627.73/s  (1.631s,  627.73/s)  LR: 4.590e-02  Data: 1.255 (1.255)
Train: 117 [  50/156 ( 33%)]  Loss: 3.99 (4.03)  Time: 0.409s, 2502.16/s  (0.434s, 2358.48/s)  LR: 4.590e-02  Data: 0.027 (0.051)
Train: 117 [ 100/156 ( 65%)]  Loss: 4.08 (4.08)  Time: 0.408s, 2512.41/s  (0.423s, 2419.32/s)  LR: 4.590e-02  Data: 0.028 (0.039)
Train: 117 [ 150/156 ( 97%)]  Loss: 4.28 (4.11)  Time: 0.407s, 2518.96/s  (0.418s, 2448.44/s)  LR: 4.590e-02  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.432 (1.432)  Loss:   7.796 ( 7.796)  Acc@1:   3.418 (  3.418)  Acc@5:   9.082 (  9.082)
Test: [  48/48]  Time: 0.090 (0.333)  Loss:   7.749 ( 7.940)  Acc@1:   2.948 (  2.990)  Acc@5:   8.255 (  7.636)
Train: 118 [   0/156 (  1%)]  Loss: 3.87 (3.87)  Time: 1.750s,  585.11/s  (1.750s,  585.11/s)  LR: 4.326e-02  Data: 1.220 (1.220)
Train: 118 [  50/156 ( 33%)]  Loss: 4.02 (4.01)  Time: 0.407s, 2516.55/s  (0.434s, 2361.60/s)  LR: 4.326e-02  Data: 0.027 (0.051)
Train: 118 [ 100/156 ( 65%)]  Loss: 4.14 (4.05)  Time: 0.411s, 2494.06/s  (0.422s, 2426.60/s)  LR: 4.326e-02  Data: 0.027 (0.039)
Train: 118 [ 150/156 ( 97%)]  Loss: 4.19 (4.07)  Time: 0.412s, 2487.38/s  (0.419s, 2445.66/s)  LR: 4.326e-02  Data: 0.027 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.440 (1.440)  Loss:   7.851 ( 7.851)  Acc@1:   3.223 (  3.223)  Acc@5:   8.398 (  8.398)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   7.864 ( 8.039)  Acc@1:   4.127 (  3.008)  Acc@5:   8.373 (  7.544)
Train: 119 [   0/156 (  1%)]  Loss: 3.97 (3.97)  Time: 1.823s,  561.64/s  (1.823s,  561.64/s)  LR: 4.069e-02  Data: 1.430 (1.430)
Train: 119 [  50/156 ( 33%)]  Loss: 4.00 (3.98)  Time: 0.411s, 2489.67/s  (0.440s, 2326.26/s)  LR: 4.069e-02  Data: 0.027 (0.055)
Train: 119 [ 100/156 ( 65%)]  Loss: 4.05 (4.01)  Time: 0.414s, 2472.84/s  (0.427s, 2399.37/s)  LR: 4.069e-02  Data: 0.032 (0.041)
Train: 119 [ 150/156 ( 97%)]  Loss: 4.05 (4.03)  Time: 0.411s, 2488.90/s  (0.422s, 2427.54/s)  LR: 4.069e-02  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.425 (1.425)  Loss:   7.862 ( 7.862)  Acc@1:   3.223 (  3.223)  Acc@5:   8.594 (  8.594)
Test: [  48/48]  Time: 0.090 (0.329)  Loss:   7.946 ( 8.041)  Acc@1:   3.420 (  3.134)  Acc@5:   8.137 (  7.600)
Train: 120 [   0/156 (  1%)]  Loss: 3.78 (3.78)  Time: 1.612s,  635.13/s  (1.612s,  635.13/s)  LR: 3.820e-02  Data: 1.239 (1.239)
Train: 120 [  50/156 ( 33%)]  Loss: 3.94 (3.95)  Time: 0.405s, 2530.78/s  (0.429s, 2384.41/s)  LR: 3.820e-02  Data: 0.027 (0.050)
Train: 120 [ 100/156 ( 65%)]  Loss: 4.15 (3.98)  Time: 0.405s, 2525.73/s  (0.418s, 2452.37/s)  LR: 3.820e-02  Data: 0.026 (0.039)
Train: 120 [ 150/156 ( 97%)]  Loss: 4.11 (4.00)  Time: 0.408s, 2506.99/s  (0.415s, 2469.95/s)  LR: 3.820e-02  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.422 (1.422)  Loss:   7.915 ( 7.915)  Acc@1:   3.027 (  3.027)  Acc@5:   8.594 (  8.594)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   7.899 ( 8.078)  Acc@1:   3.892 (  2.996)  Acc@5:   8.137 (  7.456)
Train: 121 [   0/156 (  1%)]  Loss: 3.86 (3.86)  Time: 1.660s,  616.91/s  (1.660s,  616.91/s)  LR: 3.577e-02  Data: 1.282 (1.282)
Train: 121 [  50/156 ( 33%)]  Loss: 3.91 (3.90)  Time: 0.411s, 2490.33/s  (0.437s, 2343.79/s)  LR: 3.577e-02  Data: 0.027 (0.052)
Train: 121 [ 100/156 ( 65%)]  Loss: 3.93 (3.92)  Time: 0.412s, 2482.54/s  (0.425s, 2411.49/s)  LR: 3.577e-02  Data: 0.026 (0.040)
Train: 121 [ 150/156 ( 97%)]  Loss: 4.08 (3.95)  Time: 0.410s, 2496.96/s  (0.421s, 2434.08/s)  LR: 3.577e-02  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.427 (1.427)  Loss:   7.938 ( 7.938)  Acc@1:   3.613 (  3.613)  Acc@5:   8.496 (  8.496)
Test: [  48/48]  Time: 0.090 (0.331)  Loss:   7.899 ( 8.143)  Acc@1:   3.774 (  3.076)  Acc@5:   8.019 (  7.606)
Train: 122 [   0/156 (  1%)]  Loss: 3.90 (3.90)  Time: 1.640s,  624.28/s  (1.640s,  624.28/s)  LR: 3.342e-02  Data: 1.267 (1.267)
Train: 122 [  50/156 ( 33%)]  Loss: 3.84 (3.86)  Time: 0.404s, 2536.28/s  (0.430s, 2379.50/s)  LR: 3.342e-02  Data: 0.026 (0.051)
Train: 122 [ 100/156 ( 65%)]  Loss: 3.94 (3.89)  Time: 0.406s, 2523.90/s  (0.418s, 2448.92/s)  LR: 3.342e-02  Data: 0.027 (0.039)
Train: 122 [ 150/156 ( 97%)]  Loss: 4.03 (3.92)  Time: 0.409s, 2505.04/s  (0.415s, 2466.57/s)  LR: 3.342e-02  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.431 (1.431)  Loss:   7.964 ( 7.964)  Acc@1:   2.832 (  2.832)  Acc@5:   8.301 (  8.301)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   8.008 ( 8.131)  Acc@1:   3.066 (  3.004)  Acc@5:   7.429 (  7.420)
Train: 123 [   0/156 (  1%)]  Loss: 3.73 (3.73)  Time: 2.035s,  503.10/s  (2.035s,  503.10/s)  LR: 3.113e-02  Data: 1.659 (1.659)
Train: 123 [  50/156 ( 33%)]  Loss: 3.91 (3.84)  Time: 0.408s, 2508.99/s  (0.442s, 2316.37/s)  LR: 3.113e-02  Data: 0.027 (0.059)
Train: 123 [ 100/156 ( 65%)]  Loss: 3.94 (3.87)  Time: 0.408s, 2510.67/s  (0.425s, 2407.74/s)  LR: 3.113e-02  Data: 0.027 (0.044)
Train: 123 [ 150/156 ( 97%)]  Loss: 3.92 (3.89)  Time: 0.410s, 2494.75/s  (0.421s, 2434.23/s)  LR: 3.113e-02  Data: 0.026 (0.038)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.471 (1.471)  Loss:   7.870 ( 7.870)  Acc@1:   3.809 (  3.809)  Acc@5:   7.812 (  7.812)
Test: [  48/48]  Time: 0.092 (0.329)  Loss:   7.893 ( 8.035)  Acc@1:   2.476 (  3.006)  Acc@5:   8.137 (  7.594)
Train: 124 [   0/156 (  1%)]  Loss: 3.76 (3.76)  Time: 1.802s,  568.22/s  (1.802s,  568.22/s)  LR: 2.893e-02  Data: 1.425 (1.425)
Train: 124 [  50/156 ( 33%)]  Loss: 3.73 (3.81)  Time: 0.417s, 2458.27/s  (0.440s, 2329.25/s)  LR: 2.893e-02  Data: 0.026 (0.055)
Train: 124 [ 100/156 ( 65%)]  Loss: 3.86 (3.83)  Time: 0.410s, 2498.21/s  (0.426s, 2406.16/s)  LR: 2.893e-02  Data: 0.029 (0.041)
Train: 124 [ 150/156 ( 97%)]  Loss: 3.85 (3.85)  Time: 0.402s, 2544.56/s  (0.419s, 2441.36/s)  LR: 2.893e-02  Data: 0.026 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.457 (1.457)  Loss:   8.062 ( 8.062)  Acc@1:   2.832 (  2.832)  Acc@5:   8.008 (  8.008)
Test: [  48/48]  Time: 0.091 (0.332)  Loss:   8.053 ( 8.216)  Acc@1:   2.948 (  2.946)  Acc@5:   6.722 (  7.500)
Train: 125 [   0/156 (  1%)]  Loss: 3.81 (3.81)  Time: 1.877s,  545.65/s  (1.877s,  545.65/s)  LR: 2.679e-02  Data: 1.504 (1.504)
Train: 125 [  50/156 ( 33%)]  Loss: 3.81 (3.80)  Time: 0.406s, 2521.98/s  (0.435s, 2354.15/s)  LR: 2.679e-02  Data: 0.027 (0.056)
Train: 125 [ 100/156 ( 65%)]  Loss: 3.81 (3.81)  Time: 0.408s, 2510.40/s  (0.422s, 2427.91/s)  LR: 2.679e-02  Data: 0.027 (0.042)
Train: 125 [ 150/156 ( 97%)]  Loss: 3.90 (3.83)  Time: 0.407s, 2514.93/s  (0.418s, 2447.62/s)  LR: 2.679e-02  Data: 0.026 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.426 (1.426)  Loss:   7.988 ( 7.988)  Acc@1:   2.539 (  2.539)  Acc@5:   8.105 (  8.105)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   7.950 ( 8.160)  Acc@1:   3.656 (  2.916)  Acc@5:   7.783 (  7.516)
Train: 126 [   0/156 (  1%)]  Loss: 3.62 (3.62)  Time: 1.648s,  621.38/s  (1.648s,  621.38/s)  LR: 2.474e-02  Data: 1.074 (1.074)
Train: 126 [  50/156 ( 33%)]  Loss: 3.78 (3.74)  Time: 0.413s, 2479.01/s  (0.436s, 2349.94/s)  LR: 2.474e-02  Data: 0.026 (0.048)
Train: 126 [ 100/156 ( 65%)]  Loss: 3.83 (3.76)  Time: 0.415s, 2468.14/s  (0.425s, 2409.90/s)  LR: 2.474e-02  Data: 0.027 (0.038)
Train: 126 [ 150/156 ( 97%)]  Loss: 3.79 (3.79)  Time: 0.404s, 2532.02/s  (0.420s, 2435.61/s)  LR: 2.474e-02  Data: 0.025 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.434 (1.434)  Loss:   7.890 ( 7.890)  Acc@1:   3.125 (  3.125)  Acc@5:   8.203 (  8.203)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   7.947 ( 8.058)  Acc@1:   3.066 (  3.050)  Acc@5:   6.486 (  7.606)
Train: 127 [   0/156 (  1%)]  Loss: 3.65 (3.65)  Time: 1.754s,  583.76/s  (1.754s,  583.76/s)  LR: 2.276e-02  Data: 1.358 (1.358)
Train: 127 [  50/156 ( 33%)]  Loss: 3.68 (3.73)  Time: 0.414s, 2475.03/s  (0.440s, 2329.54/s)  LR: 2.276e-02  Data: 0.027 (0.056)
Train: 127 [ 100/156 ( 65%)]  Loss: 3.81 (3.75)  Time: 0.410s, 2499.85/s  (0.426s, 2406.09/s)  LR: 2.276e-02  Data: 0.032 (0.042)
Train: 127 [ 150/156 ( 97%)]  Loss: 3.81 (3.76)  Time: 0.402s, 2544.67/s  (0.419s, 2443.88/s)  LR: 2.276e-02  Data: 0.026 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.436 (1.436)  Loss:   7.886 ( 7.886)  Acc@1:   2.930 (  2.930)  Acc@5:   8.398 (  8.398)
Test: [  48/48]  Time: 0.090 (0.328)  Loss:   7.912 ( 8.090)  Acc@1:   2.948 (  3.002)  Acc@5:   7.547 (  7.592)
Train: 128 [   0/156 (  1%)]  Loss: 3.77 (3.77)  Time: 1.844s,  555.38/s  (1.844s,  555.38/s)  LR: 2.086e-02  Data: 1.474 (1.474)
Train: 128 [  50/156 ( 33%)]  Loss: 3.75 (3.70)  Time: 0.405s, 2530.86/s  (0.433s, 2362.97/s)  LR: 2.086e-02  Data: 0.024 (0.056)
Train: 128 [ 100/156 ( 65%)]  Loss: 3.71 (3.71)  Time: 0.409s, 2502.22/s  (0.421s, 2429.48/s)  LR: 2.086e-02  Data: 0.026 (0.042)
Train: 128 [ 150/156 ( 97%)]  Loss: 3.83 (3.74)  Time: 0.410s, 2499.72/s  (0.419s, 2445.90/s)  LR: 2.086e-02  Data: 0.025 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.427 (1.427)  Loss:   7.959 ( 7.959)  Acc@1:   3.320 (  3.320)  Acc@5:   8.008 (  8.008)
Test: [  48/48]  Time: 0.090 (0.331)  Loss:   7.938 ( 8.078)  Acc@1:   3.066 (  2.950)  Acc@5:   7.783 (  7.370)
Train: 129 [   0/156 (  1%)]  Loss: 3.59 (3.59)  Time: 1.637s,  625.53/s  (1.637s,  625.53/s)  LR: 1.903e-02  Data: 1.192 (1.192)
Train: 129 [  50/156 ( 33%)]  Loss: 3.72 (3.66)  Time: 0.407s, 2518.09/s  (0.431s, 2373.27/s)  LR: 1.903e-02  Data: 0.029 (0.050)
Train: 129 [ 100/156 ( 65%)]  Loss: 3.80 (3.69)  Time: 0.406s, 2520.91/s  (0.419s, 2441.92/s)  LR: 1.903e-02  Data: 0.026 (0.039)
Train: 129 [ 150/156 ( 97%)]  Loss: 3.80 (3.71)  Time: 0.410s, 2494.76/s  (0.416s, 2459.69/s)  LR: 1.903e-02  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.443 (1.443)  Loss:   8.093 ( 8.093)  Acc@1:   2.441 (  2.441)  Acc@5:   7.715 (  7.715)
Test: [  48/48]  Time: 0.091 (0.328)  Loss:   8.043 ( 8.268)  Acc@1:   3.420 (  2.966)  Acc@5:   7.429 (  7.276)
Train: 130 [   0/156 (  1%)]  Loss: 3.65 (3.65)  Time: 2.018s,  507.56/s  (2.018s,  507.56/s)  LR: 1.729e-02  Data: 1.644 (1.644)
Train: 130 [  50/156 ( 33%)]  Loss: 3.74 (3.66)  Time: 0.406s, 2522.54/s  (0.439s, 2330.63/s)  LR: 1.729e-02  Data: 0.026 (0.059)
Train: 130 [ 100/156 ( 65%)]  Loss: 3.64 (3.67)  Time: 0.413s, 2480.44/s  (0.425s, 2411.65/s)  LR: 1.729e-02  Data: 0.027 (0.043)
Train: 130 [ 150/156 ( 97%)]  Loss: 3.72 (3.68)  Time: 0.409s, 2502.27/s  (0.421s, 2434.20/s)  LR: 1.729e-02  Data: 0.025 (0.038)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.429 (1.429)  Loss:   8.093 ( 8.093)  Acc@1:   2.344 (  2.344)  Acc@5:   7.031 (  7.031)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   8.051 ( 8.257)  Acc@1:   2.476 (  2.876)  Acc@5:   7.075 (  7.284)
Train: 131 [   0/156 (  1%)]  Loss: 3.70 (3.70)  Time: 1.822s,  561.97/s  (1.822s,  561.97/s)  LR: 1.563e-02  Data: 1.445 (1.445)
Train: 131 [  50/156 ( 33%)]  Loss: 3.66 (3.64)  Time: 0.410s, 2496.93/s  (0.440s, 2326.93/s)  LR: 1.563e-02  Data: 0.026 (0.055)
Train: 131 [ 100/156 ( 65%)]  Loss: 3.67 (3.64)  Time: 0.419s, 2441.43/s  (0.426s, 2403.03/s)  LR: 1.563e-02  Data: 0.028 (0.041)
Train: 131 [ 150/156 ( 97%)]  Loss: 3.68 (3.66)  Time: 0.409s, 2504.64/s  (0.421s, 2431.78/s)  LR: 1.563e-02  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.441 (1.441)  Loss:   8.053 ( 8.053)  Acc@1:   3.320 (  3.320)  Acc@5:   7.324 (  7.324)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   8.002 ( 8.204)  Acc@1:   2.948 (  2.988)  Acc@5:   7.547 (  7.322)
Train: 132 [   0/156 (  1%)]  Loss: 3.52 (3.52)  Time: 1.674s,  611.59/s  (1.674s,  611.59/s)  LR: 1.404e-02  Data: 1.301 (1.301)
Train: 132 [  50/156 ( 33%)]  Loss: 3.63 (3.60)  Time: 0.406s, 2524.41/s  (0.430s, 2380.79/s)  LR: 1.404e-02  Data: 0.027 (0.052)
Train: 132 [ 100/156 ( 65%)]  Loss: 3.56 (3.62)  Time: 0.407s, 2513.99/s  (0.418s, 2447.71/s)  LR: 1.404e-02  Data: 0.027 (0.040)
Train: 132 [ 150/156 ( 97%)]  Loss: 3.70 (3.64)  Time: 0.407s, 2515.14/s  (0.415s, 2468.34/s)  LR: 1.404e-02  Data: 0.024 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.422 (1.422)  Loss:   8.077 ( 8.077)  Acc@1:   3.223 (  3.223)  Acc@5:   7.520 (  7.520)
Test: [  48/48]  Time: 0.092 (0.328)  Loss:   8.055 ( 8.223)  Acc@1:   2.830 (  2.940)  Acc@5:   7.193 (  7.362)
Train: 133 [   0/156 (  1%)]  Loss: 3.64 (3.64)  Time: 1.939s,  528.20/s  (1.939s,  528.20/s)  LR: 1.254e-02  Data: 1.136 (1.136)
Train: 133 [  50/156 ( 33%)]  Loss: 3.48 (3.58)  Time: 0.412s, 2485.10/s  (0.441s, 2322.12/s)  LR: 1.254e-02  Data: 0.028 (0.049)
Train: 133 [ 100/156 ( 65%)]  Loss: 3.71 (3.60)  Time: 0.415s, 2470.03/s  (0.427s, 2399.86/s)  LR: 1.254e-02  Data: 0.028 (0.038)
Train: 133 [ 150/156 ( 97%)]  Loss: 3.69 (3.62)  Time: 0.409s, 2504.55/s  (0.422s, 2429.05/s)  LR: 1.254e-02  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.448 (1.448)  Loss:   8.055 ( 8.055)  Acc@1:   2.832 (  2.832)  Acc@5:   8.105 (  8.105)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   8.003 ( 8.203)  Acc@1:   2.830 (  2.962)  Acc@5:   8.491 (  7.482)
Train: 134 [   0/156 (  1%)]  Loss: 3.54 (3.54)  Time: 1.801s,  568.52/s  (1.801s,  568.52/s)  LR: 1.112e-02  Data: 1.295 (1.295)
Train: 134 [  50/156 ( 33%)]  Loss: 3.59 (3.57)  Time: 0.403s, 2539.07/s  (0.437s, 2344.43/s)  LR: 1.112e-02  Data: 0.026 (0.052)
Train: 134 [ 100/156 ( 65%)]  Loss: 3.57 (3.58)  Time: 0.402s, 2549.31/s  (0.420s, 2437.48/s)  LR: 1.112e-02  Data: 0.027 (0.039)
Train: 134 [ 150/156 ( 97%)]  Loss: 3.57 (3.59)  Time: 0.398s, 2570.02/s  (0.414s, 2473.43/s)  LR: 1.112e-02  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.442 (1.442)  Loss:   8.136 ( 8.136)  Acc@1:   2.930 (  2.930)  Acc@5:   7.617 (  7.617)
Test: [  48/48]  Time: 0.090 (0.329)  Loss:   8.066 ( 8.282)  Acc@1:   3.184 (  2.954)  Acc@5:   7.901 (  7.338)
Train: 135 [   0/156 (  1%)]  Loss: 3.52 (3.52)  Time: 1.710s,  598.69/s  (1.710s,  598.69/s)  LR: 9.789e-03  Data: 1.343 (1.343)
Train: 135 [  50/156 ( 33%)]  Loss: 3.67 (3.55)  Time: 0.401s, 2551.69/s  (0.427s, 2398.41/s)  LR: 9.789e-03  Data: 0.028 (0.053)
Train: 135 [ 100/156 ( 65%)]  Loss: 3.62 (3.57)  Time: 0.404s, 2533.31/s  (0.416s, 2463.86/s)  LR: 9.789e-03  Data: 0.027 (0.040)
Train: 135 [ 150/156 ( 97%)]  Loss: 3.65 (3.58)  Time: 0.406s, 2522.66/s  (0.412s, 2483.43/s)  LR: 9.789e-03  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.468 (1.468)  Loss:   8.029 ( 8.029)  Acc@1:   3.418 (  3.418)  Acc@5:   8.301 (  8.301)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   8.027 ( 8.193)  Acc@1:   3.302 (  2.956)  Acc@5:   7.429 (  7.424)
Train: 136 [   0/156 (  1%)]  Loss: 3.50 (3.50)  Time: 1.642s,  623.68/s  (1.642s,  623.68/s)  LR: 8.536e-03  Data: 1.265 (1.265)
Train: 136 [  50/156 ( 33%)]  Loss: 3.48 (3.53)  Time: 0.416s, 2459.96/s  (0.436s, 2346.83/s)  LR: 8.536e-03  Data: 0.029 (0.053)
Train: 136 [ 100/156 ( 65%)]  Loss: 3.54 (3.55)  Time: 0.411s, 2488.93/s  (0.424s, 2413.09/s)  LR: 8.536e-03  Data: 0.027 (0.040)
Train: 136 [ 150/156 ( 97%)]  Loss: 3.64 (3.56)  Time: 0.413s, 2478.80/s  (0.420s, 2436.82/s)  LR: 8.536e-03  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.432 (1.432)  Loss:   8.087 ( 8.087)  Acc@1:   3.613 (  3.613)  Acc@5:   8.203 (  8.203)
Test: [  48/48]  Time: 0.091 (0.328)  Loss:   8.035 ( 8.246)  Acc@1:   3.184 (  2.950)  Acc@5:   7.429 (  7.244)
Train: 137 [   0/156 (  1%)]  Loss: 3.49 (3.49)  Time: 1.737s,  589.55/s  (1.737s,  589.55/s)  LR: 7.367e-03  Data: 1.361 (1.361)
Train: 137 [  50/156 ( 33%)]  Loss: 3.59 (3.53)  Time: 0.415s, 2468.39/s  (0.439s, 2332.49/s)  LR: 7.367e-03  Data: 0.027 (0.056)
Train: 137 [ 100/156 ( 65%)]  Loss: 3.47 (3.53)  Time: 0.411s, 2493.22/s  (0.426s, 2404.72/s)  LR: 7.367e-03  Data: 0.028 (0.042)
Train: 137 [ 150/156 ( 97%)]  Loss: 3.46 (3.54)  Time: 0.409s, 2503.23/s  (0.421s, 2430.21/s)  LR: 7.367e-03  Data: 0.025 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.541 (1.541)  Loss:   8.080 ( 8.080)  Acc@1:   2.930 (  2.930)  Acc@5:   8.496 (  8.496)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   8.038 ( 8.248)  Acc@1:   3.066 (  2.844)  Acc@5:   7.311 (  7.328)
Train: 138 [   0/156 (  1%)]  Loss: 3.52 (3.52)  Time: 1.718s,  596.00/s  (1.718s,  596.00/s)  LR: 6.283e-03  Data: 1.165 (1.165)
Train: 138 [  50/156 ( 33%)]  Loss: 3.59 (3.50)  Time: 0.408s, 2512.44/s  (0.436s, 2349.91/s)  LR: 6.283e-03  Data: 0.027 (0.051)
Train: 138 [ 100/156 ( 65%)]  Loss: 3.67 (3.52)  Time: 0.412s, 2488.21/s  (0.424s, 2413.61/s)  LR: 6.283e-03  Data: 0.027 (0.039)
Train: 138 [ 150/156 ( 97%)]  Loss: 3.57 (3.52)  Time: 0.408s, 2511.66/s  (0.420s, 2435.78/s)  LR: 6.283e-03  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.434 (1.434)  Loss:   8.072 ( 8.072)  Acc@1:   2.832 (  2.832)  Acc@5:   8.398 (  8.398)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   8.060 ( 8.257)  Acc@1:   3.656 (  2.904)  Acc@5:   8.019 (  7.316)
Train: 139 [   0/156 (  1%)]  Loss: 3.45 (3.45)  Time: 1.664s,  615.44/s  (1.664s,  615.44/s)  LR: 5.284e-03  Data: 1.287 (1.287)
Train: 139 [  50/156 ( 33%)]  Loss: 3.48 (3.51)  Time: 0.415s, 2467.51/s  (0.436s, 2348.74/s)  LR: 5.284e-03  Data: 0.028 (0.052)
Train: 139 [ 100/156 ( 65%)]  Loss: 3.51 (3.50)  Time: 0.410s, 2494.95/s  (0.424s, 2416.87/s)  LR: 5.284e-03  Data: 0.027 (0.040)
Train: 139 [ 150/156 ( 97%)]  Loss: 3.54 (3.51)  Time: 0.409s, 2505.61/s  (0.419s, 2442.25/s)  LR: 5.284e-03  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.416 (1.416)  Loss:   8.096 ( 8.096)  Acc@1:   3.125 (  3.125)  Acc@5:   8.105 (  8.105)
Test: [  48/48]  Time: 0.092 (0.327)  Loss:   8.059 ( 8.263)  Acc@1:   3.538 (  2.980)  Acc@5:   7.547 (  7.314)
Train: 140 [   0/156 (  1%)]  Loss: 3.50 (3.50)  Time: 1.576s,  649.75/s  (1.576s,  649.75/s)  LR: 4.370e-03  Data: 1.198 (1.198)
Train: 140 [  50/156 ( 33%)]  Loss: 3.45 (3.49)  Time: 0.413s, 2479.90/s  (0.434s, 2359.58/s)  LR: 4.370e-03  Data: 0.028 (0.051)
Train: 140 [ 100/156 ( 65%)]  Loss: 3.53 (3.49)  Time: 0.409s, 2506.70/s  (0.422s, 2428.89/s)  LR: 4.370e-03  Data: 0.025 (0.039)
Train: 140 [ 150/156 ( 97%)]  Loss: 3.57 (3.50)  Time: 0.410s, 2500.07/s  (0.418s, 2451.10/s)  LR: 4.370e-03  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.471 (1.471)  Loss:   8.063 ( 8.063)  Acc@1:   3.223 (  3.223)  Acc@5:   7.715 (  7.715)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   8.049 ( 8.238)  Acc@1:   3.538 (  2.960)  Acc@5:   8.019 (  7.368)
Train: 141 [   0/156 (  1%)]  Loss: 3.44 (3.44)  Time: 1.566s,  653.73/s  (1.566s,  653.73/s)  LR: 3.543e-03  Data: 1.189 (1.189)
Train: 141 [  50/156 ( 33%)]  Loss: 3.49 (3.48)  Time: 0.410s, 2498.73/s  (0.435s, 2353.94/s)  LR: 3.543e-03  Data: 0.028 (0.050)
Train: 141 [ 100/156 ( 65%)]  Loss: 3.51 (3.48)  Time: 0.407s, 2513.48/s  (0.423s, 2423.56/s)  LR: 3.543e-03  Data: 0.028 (0.039)
Train: 141 [ 150/156 ( 97%)]  Loss: 3.47 (3.49)  Time: 0.410s, 2497.29/s  (0.418s, 2450.35/s)  LR: 3.543e-03  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.489 (1.489)  Loss:   8.056 ( 8.056)  Acc@1:   3.223 (  3.223)  Acc@5:   7.617 (  7.617)
Test: [  48/48]  Time: 0.092 (0.330)  Loss:   8.041 ( 8.245)  Acc@1:   2.948 (  2.950)  Acc@5:   8.255 (  7.460)
Train: 142 [   0/156 (  1%)]  Loss: 3.57 (3.57)  Time: 1.810s,  565.81/s  (1.810s,  565.81/s)  LR: 2.801e-03  Data: 1.433 (1.433)
Train: 142 [  50/156 ( 33%)]  Loss: 3.33 (3.48)  Time: 0.407s, 2514.33/s  (0.437s, 2340.83/s)  LR: 2.801e-03  Data: 0.027 (0.054)
Train: 142 [ 100/156 ( 65%)]  Loss: 3.48 (3.48)  Time: 0.409s, 2504.46/s  (0.423s, 2418.24/s)  LR: 2.801e-03  Data: 0.027 (0.041)
Train: 142 [ 150/156 ( 97%)]  Loss: 3.46 (3.48)  Time: 0.407s, 2513.33/s  (0.419s, 2441.91/s)  LR: 2.801e-03  Data: 0.026 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.457 (1.457)  Loss:   8.050 ( 8.050)  Acc@1:   3.125 (  3.125)  Acc@5:   7.227 (  7.227)
Test: [  48/48]  Time: 0.091 (0.330)  Loss:   8.012 ( 8.217)  Acc@1:   3.302 (  2.968)  Acc@5:   7.429 (  7.274)
Train: 143 [   0/156 (  1%)]  Loss: 3.41 (3.41)  Time: 1.597s,  641.28/s  (1.597s,  641.28/s)  LR: 2.146e-03  Data: 1.220 (1.220)
Train: 143 [  50/156 ( 33%)]  Loss: 3.43 (3.46)  Time: 0.411s, 2492.20/s  (0.434s, 2361.39/s)  LR: 2.146e-03  Data: 0.027 (0.051)
Train: 143 [ 100/156 ( 65%)]  Loss: 3.45 (3.46)  Time: 0.408s, 2507.07/s  (0.422s, 2426.22/s)  LR: 2.146e-03  Data: 0.027 (0.039)
Train: 143 [ 150/156 ( 97%)]  Loss: 3.42 (3.47)  Time: 0.408s, 2509.99/s  (0.417s, 2453.14/s)  LR: 2.146e-03  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.471 (1.471)  Loss:   8.080 ( 8.080)  Acc@1:   3.027 (  3.027)  Acc@5:   7.812 (  7.812)
Test: [  48/48]  Time: 0.091 (0.331)  Loss:   8.038 ( 8.237)  Acc@1:   3.066 (  2.952)  Acc@5:   7.665 (  7.342)
Train: 144 [   0/156 (  1%)]  Loss: 3.48 (3.48)  Time: 1.564s,  654.69/s  (1.564s,  654.69/s)  LR: 1.577e-03  Data: 1.188 (1.188)
Train: 144 [  50/156 ( 33%)]  Loss: 3.41 (3.46)  Time: 0.408s, 2511.52/s  (0.435s, 2354.07/s)  LR: 1.577e-03  Data: 0.026 (0.050)
Train: 144 [ 100/156 ( 65%)]  Loss: 3.48 (3.46)  Time: 0.404s, 2536.29/s  (0.421s, 2429.94/s)  LR: 1.577e-03  Data: 0.027 (0.039)
Train: 144 [ 150/156 ( 97%)]  Loss: 3.40 (3.46)  Time: 0.401s, 2552.82/s  (0.416s, 2463.24/s)  LR: 1.577e-03  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.456 (1.456)  Loss:   8.061 ( 8.061)  Acc@1:   3.223 (  3.223)  Acc@5:   8.008 (  8.008)
Test: [  48/48]  Time: 0.090 (0.330)  Loss:   7.999 ( 8.222)  Acc@1:   2.830 (  2.990)  Acc@5:   7.901 (  7.454)
Train: 145 [   0/156 (  1%)]  Loss: 3.42 (3.42)  Time: 1.530s,  669.07/s  (1.530s,  669.07/s)  LR: 1.096e-03  Data: 1.160 (1.160)
Train: 145 [  50/156 ( 33%)]  Loss: 3.35 (3.45)  Time: 0.403s, 2540.02/s  (0.427s, 2400.47/s)  LR: 1.096e-03  Data: 0.026 (0.050)
Train: 145 [ 100/156 ( 65%)]  Loss: 3.45 (3.45)  Time: 0.407s, 2513.03/s  (0.417s, 2455.42/s)  LR: 1.096e-03  Data: 0.026 (0.039)
Train: 145 [ 150/156 ( 97%)]  Loss: 3.43 (3.46)  Time: 0.409s, 2504.85/s  (0.415s, 2467.50/s)  LR: 1.096e-03  Data: 0.025 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.439 (1.439)  Loss:   8.080 ( 8.080)  Acc@1:   3.223 (  3.223)  Acc@5:   8.008 (  8.008)
Test: [  48/48]  Time: 0.092 (0.330)  Loss:   8.036 ( 8.255)  Acc@1:   3.184 (  2.976)  Acc@5:   7.665 (  7.340)
Train: 146 [   0/156 (  1%)]  Loss: 3.43 (3.43)  Time: 1.785s,  573.57/s  (1.785s,  573.57/s)  LR: 7.014e-04  Data: 1.158 (1.158)
Train: 146 [  50/156 ( 33%)]  Loss: 3.40 (3.45)  Time: 0.412s, 2485.31/s  (0.438s, 2340.02/s)  LR: 7.014e-04  Data: 0.027 (0.049)
Train: 146 [ 100/156 ( 65%)]  Loss: 3.39 (3.46)  Time: 0.414s, 2472.51/s  (0.425s, 2411.12/s)  LR: 7.014e-04  Data: 0.028 (0.038)
Train: 146 [ 150/156 ( 97%)]  Loss: 3.48 (3.45)  Time: 0.408s, 2506.82/s  (0.421s, 2435.01/s)  LR: 7.014e-04  Data: 0.026 (0.034)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.448 (1.448)  Loss:   8.051 ( 8.051)  Acc@1:   3.125 (  3.125)  Acc@5:   8.105 (  8.105)
Test: [  48/48]  Time: 0.091 (0.329)  Loss:   8.009 ( 8.217)  Acc@1:   3.066 (  2.968)  Acc@5:   7.783 (  7.376)
Train: 147 [   0/156 (  1%)]  Loss: 3.47 (3.47)  Time: 1.715s,  597.15/s  (1.715s,  597.15/s)  LR: 3.947e-04  Data: 1.338 (1.338)
Train: 147 [  50/156 ( 33%)]  Loss: 3.44 (3.46)  Time: 0.414s, 2472.94/s  (0.437s, 2345.49/s)  LR: 3.947e-04  Data: 0.028 (0.054)
Train: 147 [ 100/156 ( 65%)]  Loss: 3.48 (3.44)  Time: 0.411s, 2493.42/s  (0.424s, 2413.61/s)  LR: 3.947e-04  Data: 0.029 (0.041)
Train: 147 [ 150/156 ( 97%)]  Loss: 3.43 (3.44)  Time: 0.407s, 2518.64/s  (0.420s, 2436.49/s)  LR: 3.947e-04  Data: 0.025 (0.036)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.437 (1.437)  Loss:   8.046 ( 8.046)  Acc@1:   3.223 (  3.223)  Acc@5:   8.008 (  8.008)
Test: [  48/48]  Time: 0.089 (0.330)  Loss:   8.012 ( 8.216)  Acc@1:   3.184 (  2.974)  Acc@5:   8.019 (  7.370)
Train: 148 [   0/156 (  1%)]  Loss: 3.44 (3.44)  Time: 1.643s,  623.18/s  (1.643s,  623.18/s)  LR: 1.754e-04  Data: 1.274 (1.274)
Train: 148 [  50/156 ( 33%)]  Loss: 3.44 (3.44)  Time: 0.401s, 2555.65/s  (0.429s, 2387.54/s)  LR: 1.754e-04  Data: 0.027 (0.055)
Train: 148 [ 100/156 ( 65%)]  Loss: 3.43 (3.44)  Time: 0.400s, 2561.35/s  (0.415s, 2466.25/s)  LR: 1.754e-04  Data: 0.026 (0.042)
Train: 148 [ 150/156 ( 97%)]  Loss: 3.50 (3.44)  Time: 0.399s, 2567.03/s  (0.411s, 2492.50/s)  LR: 1.754e-04  Data: 0.024 (0.037)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.444 (1.444)  Loss:   8.054 ( 8.054)  Acc@1:   3.125 (  3.125)  Acc@5:   8.105 (  8.105)
Test: [  48/48]  Time: 0.091 (0.328)  Loss:   8.025 ( 8.221)  Acc@1:   2.830 (  2.946)  Acc@5:   7.665 (  7.356)
Train: 149 [   0/156 (  1%)]  Loss: 3.32 (3.32)  Time: 1.928s,  531.21/s  (1.928s,  531.21/s)  LR: 4.386e-05  Data: 1.265 (1.265)
Train: 149 [  50/156 ( 33%)]  Loss: 3.55 (3.45)  Time: 0.409s, 2506.05/s  (0.437s, 2345.88/s)  LR: 4.386e-05  Data: 0.026 (0.051)
Train: 149 [ 100/156 ( 65%)]  Loss: 3.53 (3.45)  Time: 0.411s, 2492.83/s  (0.423s, 2420.46/s)  LR: 4.386e-05  Data: 0.026 (0.039)
Train: 149 [ 150/156 ( 97%)]  Loss: 3.47 (3.45)  Time: 0.411s, 2492.45/s  (0.419s, 2442.40/s)  LR: 4.386e-05  Data: 0.026 (0.035)
Distributing BatchNorm running means and vars
Test: [   0/48]  Time: 1.433 (1.433)  Loss:   8.067 ( 8.067)  Acc@1:   3.223 (  3.223)  Acc@5:   8.105 (  8.105)
Test: [  48/48]  Time: 0.092 (0.330)  Loss:   8.038 ( 8.238)  Acc@1:   3.066 (  2.964)  Acc@5:   7.665 (  7.356)
*** Best metric: 5.61399999710083 (epoch 62)
--result
[
    {
        "epoch": 60,
        "train": {
            "loss": 6.298423767089844
        },
        "validation": {
            "loss": 6.257286530914307,
            "top1": 5.347999996948242,
            "top5": 13.058000010986328
        }
    },
    {
        "epoch": 64,
        "train": {
            "loss": 6.195786476135254
        },
        "validation": {
            "loss": 6.312929144134522,
            "top1": 5.400000004882813,
            "top5": 12.26599998413086
        }
    },
    {
        "epoch": 54,
        "train": {
            "loss": 6.431514263153076
        },
        "validation": {
            "loss": 6.243704882965088,
            "top1": 5.446000002746582,
            "top5": 12.990000000915527
        }
    },
    {
        "epoch": 65,
        "train": {
            "loss": 6.167807579040527
        },
        "validation": {
            "loss": 6.294409734191895,
            "top1": 5.461999999694824,
            "top5": 12.33400001373291
        }
    },
    {
        "epoch": 53,
        "train": {
            "loss": 6.453994274139404
        },
        "validation": {
            "loss": 6.213420800628662,
            "top1": 5.4739999940490724,
            "top5": 13.238000000610352
        }
    },
    {
        "epoch": 59,
        "train": {
            "loss": 6.324529647827148
        },
        "validation": {
            "loss": 6.24187628326416,
            "top1": 5.536000004882813,
            "top5": 12.928000008544922
        }
    },
    {
        "epoch": 56,
        "train": {
            "loss": 6.389721870422363
        },
        "validation": {
            "loss": 6.212430068511963,
            "top1": 5.562000007934571,
            "top5": 13.226000016479492
        }
    },
    {
        "epoch": 55,
        "train": {
            "loss": 6.411748886108398
        },
        "validation": {
            "loss": 6.232906625976563,
            "top1": 5.589999999694824,
            "top5": 13.177999992980958
        }
    },
    {
        "epoch": 61,
        "train": {
            "loss": 6.276514530181885
        },
        "validation": {
            "loss": 6.255700014038086,
            "top1": 5.596,
            "top5": 12.852000014038087
        }
    },
    {
        "epoch": 62,
        "train": {
            "loss": 6.248862266540527
        },
        "validation": {
            "loss": 6.251430170135498,
            "top1": 5.61399999710083,
            "top5": 12.980000000305175
        }
    }
]