18
18
package org .apache .hadoop .hdfs ;
19
19
20
20
import static org .apache .hadoop .hdfs .client .HdfsClientConfigKeys .DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY ;
21
- import static org .junit .Assert .assertEquals ;
22
- import static org .junit .Assert .assertTrue ;
23
- import static org .junit .Assert .fail ;
21
+ import static org .junit .jupiter . api . Assertions .assertEquals ;
22
+ import static org .junit .jupiter . api . Assertions .assertTrue ;
23
+ import static org .junit .jupiter . api . Assertions .fail ;
24
24
25
25
import java .io .EOFException ;
26
26
import java .io .IOException ;
53
53
import org .apache .hadoop .hdfs .server .datanode .SimulatedFSDataset ;
54
54
import org .apache .hadoop .io .IOUtils ;
55
55
import org .apache .hadoop .test .GenericTestUtils ;
56
- import org .junit .Assert ;
57
- import org .junit .Before ;
58
- import org .junit .BeforeClass ;
59
- import org .junit .Test ;
56
+ import org .junit .jupiter .api .Assertions ;
57
+ import org .junit .jupiter .api .BeforeEach ;
58
+ import org .junit .jupiter .api .BeforeAll ;
59
+ import org .junit .jupiter .api .Test ;
60
+ import org .junit .jupiter .api .Timeout ;
60
61
import org .mockito .Mockito ;
61
62
import org .mockito .invocation .InvocationOnMock ;
62
63
import org .mockito .stubbing .Answer ;
@@ -82,11 +83,13 @@ public class TestPread {
82
83
LoggerFactory .getLogger (TestPread .class .getName ());
83
84
private final GenericTestUtils .LogCapturer dfsClientLog =
84
85
GenericTestUtils .LogCapturer .captureLogs (DFSClient .LOG );
85
- @ BeforeClass
86
+
87
+ @ BeforeAll
86
88
public static void setLogLevel () {
87
89
GenericTestUtils .setLogLevel (DFSClient .LOG , org .apache .log4j .Level .WARN );
88
90
}
89
- @ Before
91
+
92
+ @ BeforeEach
90
93
public void setup () {
91
94
simulatedStorage = false ;
92
95
isHedgedRead = false ;
@@ -107,10 +110,10 @@ private void writeFile(FileSystem fileSys, Path name) throws IOException {
107
110
// should throw an exception
108
111
res = e ;
109
112
}
110
- assertTrue ("Error reading beyond file boundary." , res != null );
113
+ assertTrue (res != null , "Error reading beyond file boundary." );
111
114
in .close ();
112
115
if (!fileSys .delete (name , true ))
113
- assertTrue ("Cannot delete file" , false );
116
+ assertTrue (false , "Cannot delete file" );
114
117
115
118
// now create the real file
116
119
DFSTestUtil .createFile (fileSys , name , fileSize , fileSize ,
@@ -119,9 +122,9 @@ private void writeFile(FileSystem fileSys, Path name) throws IOException {
119
122
120
123
private void checkAndEraseData (byte [] actual , int from , byte [] expected , String message ) {
121
124
for (int idx = 0 ; idx < actual .length ; idx ++) {
122
- assertEquals (message + " byte " +( from + idx )+ " differs. expected " +
123
- expected [ from + idx ]+ " actual " + actual [ idx ],
124
- actual [ idx ], expected [ from + idx ]);
125
+ assertEquals (actual [ idx ], expected [ from + idx ],
126
+ message + " byte " + ( from + idx )
127
+ + " differs. expected " + expected [ from + idx ] + " actual " + actual [ idx ]);
125
128
actual [idx ] = 0 ;
126
129
}
127
130
}
@@ -140,17 +143,17 @@ private void doPread(FSDataInputStream stm, long position, byte[] buffer,
140
143
while (nread < length ) {
141
144
int nbytes =
142
145
stm .read (position + nread , buffer , offset + nread , length - nread );
143
- assertTrue ("Error in pread" , nbytes > 0 );
146
+ assertTrue (nbytes > 0 , "Error in pread" );
144
147
nread += nbytes ;
145
148
}
146
149
147
150
if (dfstm != null ) {
148
151
if (isHedgedRead ) {
149
- assertTrue ("Expected read statistic to be incremented" , length <= dfstm
150
- . getReadStatistics (). getTotalBytesRead () - totalRead );
152
+ assertTrue (length <= dfstm . getReadStatistics (). getTotalBytesRead () - totalRead ,
153
+ "Expected read statistic to be incremented" );
151
154
} else {
152
- assertEquals ("Expected read statistic to be incremented" , length , dfstm
153
- . getReadStatistics (). getTotalBytesRead () - totalRead );
155
+ assertEquals (length , dfstm . getReadStatistics (). getTotalBytesRead () - totalRead ,
156
+ "Expected read statistic to be incremented" );
154
157
}
155
158
}
156
159
}
@@ -221,7 +224,7 @@ private void pReadFile(FileSystem fileSys, Path name) throws IOException {
221
224
// should throw an exception
222
225
res = e ;
223
226
}
224
- assertTrue ("Error reading beyond file boundary." , res != null );
227
+ assertTrue (res != null , "Error reading beyond file boundary." );
225
228
226
229
stm .close ();
227
230
}
@@ -553,9 +556,9 @@ public Void call() throws IOException {
553
556
});
554
557
try {
555
558
future .get (4 , TimeUnit .SECONDS );
556
- Assert .fail ();
559
+ Assertions .fail ();
557
560
} catch (ExecutionException ee ) {
558
- assertTrue (ee .toString () , ee .getCause () instanceof EOFException );
561
+ assertTrue (ee .getCause () instanceof EOFException , ee .toString () );
559
562
} finally {
560
563
future .cancel (true );
561
564
executor .shutdown ();
@@ -570,7 +573,8 @@ public Void call() throws IOException {
570
573
* retrying on a different datanode or by refreshing data nodes and retrying each data node one
571
574
* more time.
572
575
*/
573
- @ Test (timeout =120000 )
576
+ @ Test
577
+ @ Timeout (value = 120 )
574
578
public void testGetFromOneDataNodeExceptionLogging () throws IOException {
575
579
// With maxBlockAcquireFailures = 0, we would try on each datanode only once and if
576
580
// we fail on all three datanodes, we fail the read request.
@@ -647,7 +651,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
647
651
/**
648
652
* Test the case where we always hit IOExceptions, causing the read request to fail.
649
653
*/
650
- @ Test (timeout =60000 )
654
+ @ Test
655
+ @ Timeout (value = 60 )
651
656
public void testFetchFromDataNodeExceptionLoggingFailedRequest ()
652
657
throws IOException {
653
658
testFetchFromDataNodeExceptionLoggingFailedRequest (0 );
@@ -723,7 +728,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
723
728
}
724
729
}
725
730
726
- @ Test (timeout =30000 )
731
+ @ Test
732
+ @ Timeout (value = 30 )
727
733
public void testHedgedReadFromAllDNFailed () throws IOException {
728
734
Configuration conf = new Configuration ();
729
735
int numHedgedReadPoolThreads = 5 ;
@@ -768,7 +774,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
768
774
byte [] buffer = new byte [64 * 1024 ];
769
775
input = dfsClient .open (filename );
770
776
input .read (0 , buffer , 0 , 1024 );
771
- Assert .fail ("Reading the block should have thrown BlockMissingException" );
777
+ Assertions .fail ("Reading the block should have thrown BlockMissingException" );
772
778
} catch (BlockMissingException e ) {
773
779
// The result of 9 is due to 2 blocks by 4 iterations plus one because
774
780
// hedgedReadOpsLoopNumForTesting is incremented at start of the loop.
@@ -808,7 +814,8 @@ public void testPreadFailureWithChangedBlockLocations() throws Exception {
808
814
* 7. Consider next calls to getBlockLocations() always returns DN3 as last
809
815
* location.<br>
810
816
*/
811
- @ Test (timeout = 60000 )
817
+ @ Test
818
+ @ Timeout (value = 60 )
812
819
public void testPreadHedgedFailureWithChangedBlockLocations ()
813
820
throws Exception {
814
821
isHedgedRead = true ;
@@ -929,10 +936,10 @@ public Boolean get() {
929
936
byte [] buf = new byte [1024 ];
930
937
int n = din .read (0 , buf , 0 , data .length ());
931
938
assertEquals (data .length (), n );
932
- assertEquals ("Data should be read" , data , new String (buf , 0 , n ));
933
- assertTrue ("Read should complete with maximum " + maxFailures
934
- + " failures, but completed with " + din . failures ,
935
- din . failures <= maxFailures );
939
+ assertEquals (data , new String (buf , 0 , n ), "Data should be read" );
940
+ assertTrue (din . failures <= maxFailures ,
941
+ "Read should complete with maximum " + maxFailures
942
+ + " failures, but completed with " + din . failures );
936
943
DFSClient .LOG .info ("Read completed" );
937
944
}
938
945
}
0 commit comments