Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.tools.GetUserMappingsProtocol;
import org.apache.hadoop.util.Sets;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
Expand All @@ -57,6 +56,8 @@
import java.net.URL;
import java.net.URLDecoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
Expand Down Expand Up @@ -120,7 +121,7 @@ public Set<String> getGroupsSet(String user) throws IOException {
LOG.info("Getting groups in MockUnixGroupsMapping");
String g1 = user + (10 * i + 1);
String g2 = user + (10 * i + 2);
Set<String> s = Sets.newHashSet(g1, g2);
Set<String> s = new HashSet<>(Arrays.asList(g1, g2));
i++;
return s;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@
import org.apache.hadoop.net.DomainNameResolverFactory;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Sets;
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -725,8 +724,9 @@ private static Collection<String> getParentNameServices(Configuration conf)
} else {
// Ensure that the internal service is indeed in the list of all available
// nameservices.
Set<String> availableNameServices = Sets.newHashSet(conf
.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES));
Collection<String> namespaces = conf
.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES);
Set<String> availableNameServices = new HashSet<>(namespaces);
for (String nsId : parentNameServices) {
if (!availableNameServices.contains(nsId)) {
throw new IOException("Unknown nameservice: " + nsId);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Sets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -51,6 +50,7 @@
import java.net.URISyntaxException;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
Expand Down Expand Up @@ -273,7 +273,7 @@ private List<InetSocketAddress> getOtherJournalNodeAddrs() {
}

if (uriStr == null || uriStr.isEmpty()) {
HashSet<String> sharedEditsUri = Sets.newHashSet();
HashSet<String> sharedEditsUri = new HashSet<>();
if (nameServiceId != null) {
Collection<String> nnIds = DFSUtilClient.getNameNodeIds(
conf, nameServiceId);
Expand Down Expand Up @@ -315,7 +315,7 @@ private List<InetSocketAddress> getJournalAddrList(String uriStr) throws
IOException {
URI uri = new URI(uriStr);
return Util.getLoggerAddresses(uri,
Sets.newHashSet(jn.getBoundIpcAddress()), conf);
new HashSet<>(Arrays.asList(jn.getBoundIpcAddress())), conf);
}

private void getMissingLogSegments(List<RemoteEditLog> thisJournalEditLogs,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
Expand Down Expand Up @@ -143,11 +144,11 @@ void writeUnlock() {
void refreshNNList(String serviceId, List<String> nnIds,
ArrayList<InetSocketAddress> addrs,
ArrayList<InetSocketAddress> lifelineAddrs) throws IOException {
Set<InetSocketAddress> oldAddrs = Sets.newHashSet();
Set<InetSocketAddress> oldAddrs = new HashSet<>();
for (BPServiceActor actor : bpServices) {
oldAddrs.add(actor.getNNSocketAddress());
}
Set<InetSocketAddress> newAddrs = Sets.newHashSet(addrs);
Set<InetSocketAddress> newAddrs = new HashSet<>(addrs);

// Process added NNs
Set<InetSocketAddress> addedNNs = Sets.difference(newAddrs, oldAddrs);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -196,8 +196,8 @@ private void doRefreshNamenodes(

// Step 2. Any nameservices we currently have but are no longer present
// need to be removed.
toRemove = Sets.newHashSet(Sets.difference(
bpByNameserviceId.keySet(), addrMap.keySet()));
toRemove = Sets.difference(
bpByNameserviceId.keySet(), addrMap.keySet());

assert toRefresh.size() + toAdd.size() ==
addrMap.size() :
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,6 @@
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Sets;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Timer;

Expand Down Expand Up @@ -404,11 +403,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
*/
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(
Collection<StorageLocation> dataLocations, DataStorage storage) {
Set<StorageLocation> failedLocationSet = Sets.newHashSetWithExpectedSize(
dataLocations.size());
for (StorageLocation sl: dataLocations) {
failedLocationSet.add(sl);
}
Set<StorageLocation> failedLocationSet = new HashSet<>(dataLocations);
for (Iterator<Storage.StorageDirectory> it = storage.dirIterator();
it.hasNext(); ) {
Storage.StorageDirectory sd = it.next();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import java.util.Map;
import java.util.PriorityQueue;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.CopyOnWriteArrayList;

import org.slf4j.Logger;
Expand All @@ -42,7 +43,6 @@
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Sets;

import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.util.Preconditions;
Expand Down Expand Up @@ -677,7 +677,7 @@ public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId) {
// storage directory with ancient logs. Clear out any logs we've
// accumulated so far, and then skip to the next segment of logs
// after the gap.
SortedSet<Long> startTxIds = Sets.newTreeSet(logsByStartTxId.keySet());
SortedSet<Long> startTxIds = new TreeSet<>(logsByStartTxId.keySet());
startTxIds = startTxIds.tailSet(curStartTxId);
if (startTxIds.isEmpty()) {
break;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ public int getAttemptedItemsCount() {
@VisibleForTesting
public void updateAttemptedItemsCount() {
storagePolicySatisfier.getAttemptedItemsMonitor().getStorageMovementAttemptedItems()
.add(new StoragePolicySatisfier.AttemptedItemInfo(0, 1, 1, new HashSet<>(), 1));
.add(new StoragePolicySatisfier.AttemptedItemInfo(0, 1,
1, new HashSet<>(), 1));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
Expand All @@ -87,7 +88,6 @@
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.Sets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience;
Expand Down Expand Up @@ -653,7 +653,7 @@ public DataNode getDatanode() {
private boolean federation;
private boolean checkExitOnShutdown = true;
protected final int storagesPerDatanode;
private Set<FileSystem> fileSystems = Sets.newHashSet();
private Set<FileSystem> fileSystems = new HashSet<>();

private List<long[]> storageCap = Lists.newLinkedList();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
Expand Down Expand Up @@ -80,7 +81,6 @@
import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.Sets;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
Expand Down Expand Up @@ -1042,10 +1042,10 @@ public void testGetNNServiceRpcAddressesForNsIds() throws IOException {

{
Collection<String> internal = DFSUtil.getInternalNameServices(conf);
assertEquals(Sets.newHashSet("nn1"), internal);
assertEquals(new HashSet<>(Arrays.asList("nn1")), internal);

Collection<String> all = DFSUtilClient.getNameServiceIds(conf);
assertEquals(Sets.newHashSet("nn1", "nn2"), all);
assertEquals(new HashSet<>(Arrays.asList("nn1", "nn2")), all);
}

Map<String, Map<String, InetSocketAddress>> nnMap = DFSUtil
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,14 @@
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.Sets;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Arrays;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
Expand Down Expand Up @@ -309,11 +309,11 @@ public void testChooseRandomWithStorageType() throws Exception {
// test the choose random can return desired storage type nodes without
// exclude
Set<String> diskUnderL1 =
Sets.newHashSet("host2", "host4", "host5", "host6");
Set<String> archiveUnderL1 = Sets.newHashSet("host1", "host3");
Set<String> ramdiskUnderL1 = Sets.newHashSet("host7");
Set<String> ssdUnderL1 = Sets.newHashSet("host8");
Set<String> nvdimmUnderL1 = Sets.newHashSet("host9");
new HashSet<>(Arrays.asList("host2", "host4", "host5", "host6"));
Set<String> archiveUnderL1 = new HashSet<>(Arrays.asList("host1", "host3"));
Set<String> ramdiskUnderL1 = new HashSet<>(Arrays.asList("host7"));
Set<String> ssdUnderL1 = new HashSet<>(Arrays.asList("host8"));
Set<String> nvdimmUnderL1 = new HashSet<>(Arrays.asList("host9"));
for (int i = 0; i < 10; i++) {
n = CLUSTER.chooseRandomWithStorageType("/l1", null, null,
StorageType.DISK);
Expand Down Expand Up @@ -396,7 +396,7 @@ public void testChooseRandomWithStorageTypeWithExcluded() throws Exception {
assertEquals("host6", dd.getHostName());
// exclude the host on r4 (since there is only one host, no randomness here)
excluded.add(n);
Set<String> expectedSet = Sets.newHashSet("host4", "host5");
Set<String> expectedSet = new HashSet<>(Arrays.asList("host4", "host5"));
for (int i = 0; i < 10; i++) {
// under l1, there are four hosts with DISK:
// /l1/d1/r1/host2, /l1/d1/r2/host4, /l1/d1/r2/host5 and /l1/d2/r3/host6
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import java.util.Map;
import java.util.Random;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;

Expand All @@ -51,7 +52,6 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Sets;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
Expand Down Expand Up @@ -108,7 +108,7 @@ private static long determineMaxIpcNumber() throws Exception {
qjm.format(FAKE_NSINFO, false);
doWorkload(cluster, qjm);

SortedSet<Integer> ipcCounts = Sets.newTreeSet();
SortedSet<Integer> ipcCounts = new TreeSet<>();
for (AsyncLogger l : qjm.getLoggerSetForTests().getLoggersForTests()) {
InvocationCountingChannel ch = (InvocationCountingChannel)l;
ch.waitForAllPendingCalls();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@

import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.HashSet;
import java.util.Set;

import org.apache.hadoop.conf.Configuration;
Expand Down Expand Up @@ -73,13 +74,13 @@ public void testRefreshNamenodes() throws IOException {

// Ensure a BPOfferService in the datanodes corresponds to
// a namenode in the cluster
Set<InetSocketAddress> nnAddrsFromCluster = Sets.newHashSet();
Set<InetSocketAddress> nnAddrsFromCluster = new HashSet<>();
for (int i = 0; i < 4; i++) {
assertTrue(nnAddrsFromCluster.add(
cluster.getNameNode(i).getNameNodeAddress()));
}

Set<InetSocketAddress> nnAddrsFromDN = Sets.newHashSet();
Set<InetSocketAddress> nnAddrsFromDN = new HashSet<>();
for (BPOfferService bpos : dn.getAllBpOs()) {
for (BPServiceActor bpsa : bpos.getBPServiceActors()) {
assertTrue(nnAddrsFromDN.add(bpsa.getNNSocketAddress()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -416,7 +416,7 @@ public static void assertFileContentsSame(File... files) throws Exception {
if (files.length < 2) return;

Map<File, String> md5s = getFileMD5s(files);
if (Sets.newHashSet(md5s.values()).size() > 1) {
if (new HashSet<>(md5s.values()).size() > 1) {
fail("File contents differed:\n " +
Joiner.on("\n ")
.withKeyValueSeparator("=")
Expand All @@ -433,7 +433,8 @@ public static void assertFileContentsDifferent(
File... files) throws Exception
{
Map<File, String> md5s = getFileMD5s(files);
if (Sets.newHashSet(md5s.values()).size() != expectedUniqueHashes) {
int uniqueHashes = new HashSet<>(md5s.values()).size();
if (uniqueHashes != expectedUniqueHashes) {
fail("Expected " + expectedUniqueHashes + " different hashes, got:\n " +
Joiner.on("\n ")
.withKeyValueSeparator("=")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Random;
Expand Down Expand Up @@ -118,7 +119,6 @@
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Sets;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
Expand Down Expand Up @@ -386,15 +386,15 @@ public void testFsckMove() throws Exception {
cluster.getNameNodePort()), conf);
String[] fileNames = util.getFileNames(topDir);
CorruptedTestFile[] ctFiles = new CorruptedTestFile[]{
new CorruptedTestFile(fileNames[0], Sets.newHashSet(0),
new CorruptedTestFile(fileNames[0], new HashSet<>(Arrays.asList(0)),
dfsClient, numDatanodes, dfsBlockSize),
new CorruptedTestFile(fileNames[1], Sets.newHashSet(2, 3),
new CorruptedTestFile(fileNames[1], new HashSet<>(Arrays.asList(2, 3)),
dfsClient, numDatanodes, dfsBlockSize),
new CorruptedTestFile(fileNames[2], Sets.newHashSet(4),
new CorruptedTestFile(fileNames[2], new HashSet<>(Arrays.asList(4)),
dfsClient, numDatanodes, dfsBlockSize),
new CorruptedTestFile(fileNames[3], Sets.newHashSet(0, 1, 2, 3),
new CorruptedTestFile(fileNames[3], new HashSet<>(Arrays.asList(0, 1, 2, 3)),
dfsClient, numDatanodes, dfsBlockSize),
new CorruptedTestFile(fileNames[4], Sets.newHashSet(1, 2, 3, 4),
new CorruptedTestFile(fileNames[4], new HashSet<>(Arrays.asList(1, 2, 3, 4)),
dfsClient, numDatanodes, dfsBlockSize)
};
int totalMissingBlocks = 0;
Expand Down Expand Up @@ -2215,7 +2215,7 @@ public void testFsckMoveAfterCorruption() throws Exception {
new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
final String blockFileToCorrupt = fileNames[0];
final CorruptedTestFile ctf = new CorruptedTestFile(blockFileToCorrupt,
Sets.newHashSet(0), dfsClient, numDatanodes, dfsBlockSize);
new HashSet<>(Arrays.asList(0)), dfsClient, numDatanodes, dfsBlockSize);
ctf.corruptBlocks(cluster);

// Wait for fsck to discover all the missing blocks
Expand Down
Loading