Skip to content

Commit 4dc4e04

Browse files
authored
Fixing CI for pytorch version tests (#3130)
* Fixed mypy issues * Revert changes for visdom * Fixing CI for pytorch version tests * Fix code formatting
1 parent 30ceaf2 commit 4dc4e04

File tree

3 files changed

+9
-0
lines changed

3 files changed

+9
-0
lines changed

tests/ignite/metrics/test_accuracy.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
import pytest
44
import torch
5+
from packaging.version import Version
56
from sklearn.metrics import accuracy_score
67

78
import ignite.distributed as idist
@@ -550,6 +551,7 @@ def update(_, i):
550551
@pytest.mark.distributed
551552
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
552553
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
554+
@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="Skip if < 1.7.0")
553555
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
554556
device = idist.device()
555557
_test_distrib_multilabel_input_NHW(device)
@@ -561,6 +563,7 @@ def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
561563

562564
@pytest.mark.distributed
563565
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
566+
@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="Skip if < 1.7.0")
564567
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
565568
device = idist.device()
566569
_test_distrib_multilabel_input_NHW(device)

tests/ignite/metrics/test_classification_report.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
import pytest
55
import torch
6+
from packaging.version import Version
67

78
import ignite.distributed as idist
89
from ignite.engine import Engine
@@ -161,6 +162,7 @@ def update(engine, i):
161162
@pytest.mark.distributed
162163
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
163164
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
165+
@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="Skip if < 1.7.0")
164166
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
165167
device = idist.device()
166168
_test_integration_multiclass(device, True)
@@ -171,6 +173,7 @@ def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
171173

172174
@pytest.mark.distributed
173175
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
176+
@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="Skip if < 1.7.0")
174177
def test_distrib_gloo_cpu_or_gpu(local_rank, distributed_context_single_node_gloo):
175178
device = idist.device()
176179
_test_integration_multiclass(device, True)

tests/ignite/metrics/test_metric.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
import numpy as np
77
import pytest
88
import torch
9+
from packaging.version import Version
910
from pytest import approx, raises
1011
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
1112

@@ -710,6 +711,7 @@ def _test_creating_on_xla_fails(device):
710711
@pytest.mark.distributed
711712
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
712713
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
714+
@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="Skip if < 1.7.0")
713715
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
714716
device = idist.device()
715717
_test_distrib_sync_all_reduce_decorator(device)
@@ -722,6 +724,7 @@ def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
722724

723725
@pytest.mark.distributed
724726
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
727+
@pytest.mark.skipif(Version(torch.__version__) < Version("1.7.0"), reason="Skip if < 1.7.0")
725728
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
726729
device = idist.device()
727730
_test_distrib_sync_all_reduce_decorator(device)

0 commit comments

Comments
 (0)