Skip to content

Commit 733d4f0

Browse files
authored
HIVE-28891: Fix spotbugs issues in hive-shims with spotbugs-maven-plugin 4.8.6.6 (#5776)
1 parent be59bd0 commit 733d4f0

File tree

15 files changed

+88
-75
lines changed

15 files changed

+88
-75
lines changed

Jenkinsfile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -251,6 +251,7 @@ git merge origin/target
251251
}
252252
stage('Prechecks') {
253253
def spotbugsProjects = [
254+
":hive-shims",
254255
":hive-storage-api",
255256
":hive-service-rpc"
256257
]

hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -803,7 +803,7 @@ private void registerPartitions(JobContext context) throws IOException{
803803
StorerInfo storer = InternalUtil.extractStorerInfo(table.getTTable().getSd(),
804804
table.getParameters());
805805

806-
HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(conf, fs, tblPath);
806+
HdfsUtils.HadoopFileStatus status = HdfsUtils.HadoopFileStatus.createInstance(conf, fs, tblPath);
807807

808808
List<Partition> partitionsToAdd = new ArrayList<Partition>();
809809
if (!dynamicPartitioningUsed) {

itests/hive-unit/src/test/java/org/apache/hadoop/hive/io/TestHadoopFileStatus.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ public void testHadoopFileStatusAclEntries() throws IOException {
7575
FileStatus mockFileStatus = Mockito.mock(FileStatus.class);
7676
Mockito.when(mockDfs.getAclStatus(mockPath)).thenReturn(aclStatus);
7777
Mockito.when(mockDfs.getFileStatus(mockPath)).thenReturn(mockFileStatus);
78-
sourceStatus = new HadoopFileStatus(hiveConf, mockDfs, mockPath);
78+
sourceStatus = HadoopFileStatus.createInstance(hiveConf, mockDfs, mockPath);
7979
Assert.assertNotNull(sourceStatus.getAclEntries());
8080
Assert.assertTrue(sourceStatus.getAclEntries().size() == 3);
8181
Iterables.removeIf(sourceStatus.getAclEntries(), new Predicate<AclEntry>() {

ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -796,7 +796,7 @@ private void truncateTempTable(org.apache.hadoop.hive.metastore.api.Table table)
796796
HadoopShims.HdfsEncryptionShim shim
797797
= ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, conf);
798798
if (!shim.isPathEncrypted(location)) {
799-
HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(conf, fs, location);
799+
HdfsUtils.HadoopFileStatus status = HdfsUtils.HadoopFileStatus.createInstance(conf, fs, location);
800800
FileStatus targetStatus = fs.getFileStatus(location);
801801
String targetGroup = targetStatus == null ? null : targetStatus.getGroup();
802802
FileUtils.moveToTrash(fs, location, conf, isSkipTrash);

shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java

Lines changed: 25 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,7 @@ public MiniMrShim getMiniMrCluster(Configuration conf, int numberOfTaskTrackers,
288288
/**
289289
* Shim for MiniMrCluster
290290
*/
291-
public class MiniMrShim implements HadoopShims.MiniMrShim {
291+
public static class MiniMrShim implements HadoopShims.MiniMrShim {
292292

293293
private final MiniMRCluster mr;
294294
private final Configuration conf;
@@ -300,7 +300,7 @@ public MiniMrShim() {
300300

301301
public MiniMrShim(Configuration conf, int numberOfTaskTrackers,
302302
String nameNode, int numDir) throws IOException {
303-
this.conf = conf;
303+
this.conf = new Configuration(conf);
304304

305305
JobConf jConf = new JobConf(conf);
306306
jConf.set("yarn.scheduler.capacity.root.queues", "default");
@@ -349,12 +349,12 @@ public HadoopShims.MiniMrShim getLocalMiniTezCluster(Configuration conf, boolean
349349
return new MiniTezLocalShim(conf, usingLlap);
350350
}
351351

352-
public class MiniTezLocalShim extends Hadoop23Shims.MiniMrShim {
352+
public static class MiniTezLocalShim extends Hadoop23Shims.MiniMrShim {
353353
private final Configuration conf;
354354
private final boolean isLlap;
355355

356356
public MiniTezLocalShim(Configuration conf, boolean usingLlap) {
357-
this.conf = conf;
357+
this.conf = new Configuration(conf);
358358
this.isLlap = usingLlap;
359359
setupConfiguration(conf);
360360
}
@@ -401,7 +401,7 @@ public MiniMrShim getMiniTezCluster(Configuration conf, int numberOfTaskTrackers
401401
/**
402402
* Shim for MiniTezCluster
403403
*/
404-
public class MiniTezShim extends Hadoop23Shims.MiniMrShim {
404+
public static class MiniTezShim extends Hadoop23Shims.MiniMrShim {
405405

406406
private final MiniTezCluster mr;
407407
private final Configuration conf;
@@ -553,6 +553,7 @@ private static void setKeyProvider(DFSClient dfsClient, KeyProviderCryptoExtensi
553553
public static class MiniDFSShim implements HadoopShims.MiniDFSShim {
554554
private final MiniDFSCluster cluster;
555555

556+
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "intended")
556557
public MiniDFSShim(MiniDFSCluster cluster) {
557558
this.cluster = cluster;
558559
}
@@ -670,7 +671,7 @@ public boolean isFileInHDFS(FileSystem fs, Path path) throws IOException {
670671
}
671672
@Override
672673
public WebHCatJTShim getWebHCatShim(Configuration conf, UserGroupInformation ugi) throws IOException {
673-
return new WebHCatJTShim23(conf, ugi);//this has state, so can't be cached
674+
return WebHCatJTShim23.createInstance(conf, ugi);//this has state, so can't be cached
674675
}
675676

676677
private static final class HdfsFileStatusWithIdImpl implements HdfsFileStatusWithId {
@@ -761,9 +762,6 @@ public void hflush(FSDataOutputStream stream) throws IOException {
761762
}
762763

763764
static class ProxyFileSystem23 extends ProxyFileSystem {
764-
public ProxyFileSystem23(FileSystem fs) {
765-
super(fs);
766-
}
767765
public ProxyFileSystem23(FileSystem fs, URI uri) {
768766
super(fs, uri);
769767
}
@@ -774,12 +772,11 @@ public FutureDataInputStreamBuilder openFile(Path path) throws IOException, Unsu
774772
}
775773

776774
@Override
777-
public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f)
778-
throws FileNotFoundException, IOException {
775+
public RemoteIterator<LocatedFileStatus> listLocatedStatus(final Path f) throws FileNotFoundException, IOException {
776+
final RemoteIterator<LocatedFileStatus> remoteIterator =
777+
ProxyFileSystem23.super.listLocatedStatus(ProxyFileSystem23.super.swizzleParamPath(f));
779778
return new RemoteIterator<LocatedFileStatus>() {
780-
private final RemoteIterator<LocatedFileStatus> stats =
781-
ProxyFileSystem23.super.listLocatedStatus(
782-
ProxyFileSystem23.super.swizzleParamPath(f));
779+
private final RemoteIterator<LocatedFileStatus> stats = remoteIterator;
783780

784781
@Override
785782
public boolean hasNext() throws IOException {
@@ -789,8 +786,7 @@ public boolean hasNext() throws IOException {
789786
@Override
790787
public LocatedFileStatus next() throws IOException {
791788
LocatedFileStatus result = stats.next();
792-
return new LocatedFileStatus(
793-
ProxyFileSystem23.super.swizzleFileStatus(result, false),
789+
return new LocatedFileStatus(ProxyFileSystem23.super.swizzleFileStatus(result, false),
794790
result.getBlockLocations());
795791
}
796792
};
@@ -1005,6 +1001,7 @@ public static class StoragePolicyShim implements HadoopShims.StoragePolicyShim {
10051001

10061002
private final DistributedFileSystem dfs;
10071003

1004+
@SuppressFBWarnings(value = "EI_EXPOSE_REP2", justification = "intended")
10081005
public StoragePolicyShim(DistributedFileSystem fs) {
10091006
this.dfs = fs;
10101007
}
@@ -1334,10 +1331,17 @@ public static class HdfsEncryptionShim implements HadoopShims.HdfsEncryptionShim
13341331

13351332
private final Configuration conf;
13361333

1337-
public HdfsEncryptionShim(URI uri, Configuration conf) throws IOException {
1334+
public static HdfsEncryptionShim createInstance(URI uri, Configuration conf) throws IOException {
1335+
HdfsAdmin hadmin = new HdfsAdmin(uri, conf);
1336+
KeyProvider keyP = hadmin.getKeyProvider();
1337+
HdfsEncryptionShim hdfsEncryptionShim = new HdfsEncryptionShim(conf);
1338+
hdfsEncryptionShim.hdfsAdmin = hadmin;
1339+
hdfsEncryptionShim.keyProvider = keyP;
1340+
return hdfsEncryptionShim;
1341+
}
1342+
1343+
private HdfsEncryptionShim(Configuration conf) {
13381344
this.conf = conf;
1339-
this.hdfsAdmin = new HdfsAdmin(uri, conf);
1340-
this.keyProvider = this.hdfsAdmin.getKeyProvider();
13411345
}
13421346

13431347
@Override
@@ -1527,7 +1531,7 @@ public HadoopShims.HdfsEncryptionShim createHdfsEncryptionShim(FileSystem fs, Co
15271531
if (isHdfsEncryptionSupported()) {
15281532
URI uri = fs.getUri();
15291533
if ("hdfs".equals(uri.getScheme()) && fs instanceof DistributedFileSystem) {
1530-
return new HdfsEncryptionShim(uri, conf);
1534+
return HdfsEncryptionShim.createInstance(uri, conf);
15311535
}
15321536
}
15331537

@@ -1598,7 +1602,6 @@ public UserGroupInformation cloneUgi(UserGroupInformation baseUgi) throws IOExce
15981602
}
15991603
try {
16001604
Subject origSubject = (Subject) getSubjectMethod.invoke(baseUgi);
1601-
16021605
Subject subject = new Subject(false, origSubject.getPrincipals(),
16031606
cloneCredentials(origSubject.getPublicCredentials()),
16041607
cloneCredentials(origSubject.getPrivateCredentials()));
@@ -1616,7 +1619,7 @@ private static Set<Object> cloneCredentials(Set<Object> old) {
16161619
}
16171620
return set;
16181621
}
1619-
1622+
16201623
private static Boolean hdfsErasureCodingSupport;
16211624

16221625
/**

shims/0.23/src/main/java/org/apache/hadoop/mapred/WebHCatJTShim23.java

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -51,20 +51,25 @@ public class WebHCatJTShim23 implements WebHCatJTShim {
5151
/**
5252
* Create a connection to the Job Tracker.
5353
*/
54-
public WebHCatJTShim23(final Configuration conf, final UserGroupInformation ugi)
55-
throws IOException {
54+
public static WebHCatJTShim createInstance(final Configuration conf, final UserGroupInformation ugi)
55+
throws IOException {
56+
JobClient jc;
5657
try {
57-
this.conf = conf;
58-
jc = ugi.doAs(new PrivilegedExceptionAction<JobClient>() {
59-
public JobClient run() throws IOException, InterruptedException {
60-
//create this in doAs() so that it gets a security context based passed in 'ugi'
61-
return new JobClient(conf);
62-
}
63-
});
64-
}
65-
catch(InterruptedException ex) {
58+
jc = ugi.doAs(new PrivilegedExceptionAction<JobClient>() {
59+
public JobClient run() throws IOException, InterruptedException {
60+
//create this in doAs() so that it gets a security context based passed in 'ugi'
61+
return new JobClient(conf);
62+
}
63+
});
64+
} catch (InterruptedException ex) {
6665
throw new RuntimeException("Failed to create JobClient", ex);
6766
}
67+
return new WebHCatJTShim23(conf, jc);
68+
}
69+
70+
private WebHCatJTShim23(final Configuration conf, final JobClient jc) {
71+
this.conf = new Configuration(conf);
72+
this.jc = jc;
6873
}
6974

7075
/**

shims/common/src/main/java/org/apache/hadoop/fs/ProxyFileSystem.java

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -68,14 +68,6 @@ protected FileStatus swizzleFileStatus(FileStatus orig, boolean isParam) {
6868
return ret;
6969
}
7070

71-
public ProxyFileSystem() {
72-
throw new RuntimeException ("Unsupported constructor");
73-
}
74-
75-
public ProxyFileSystem(FileSystem fs) {
76-
throw new RuntimeException ("Unsupported constructor");
77-
}
78-
7971
/**
8072
*
8173
* @param p
@@ -207,9 +199,9 @@ public FileStatus[] listStatus(Path f) throws IOException {
207199

208200
@Override //ref. HADOOP-12502
209201
public RemoteIterator<FileStatus> listStatusIterator(Path f) throws IOException {
202+
final RemoteIterator<FileStatus> remoteIterator = ProxyFileSystem.super.listStatusIterator(swizzleParamPath(f));
210203
return new RemoteIterator<FileStatus>() {
211-
private final RemoteIterator<FileStatus> orig =
212-
ProxyFileSystem.super.listStatusIterator(swizzleParamPath(f));
204+
private final RemoteIterator<FileStatus> orig = remoteIterator;
213205

214206
@Override
215207
public boolean hasNext() throws IOException {

shims/common/src/main/java/org/apache/hadoop/fs/ProxyLocalFileSystem.java

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -48,10 +48,6 @@ public ProxyLocalFileSystem() {
4848
localFs = new LocalFileSystem();
4949
}
5050

51-
public ProxyLocalFileSystem(FileSystem fs) {
52-
throw new RuntimeException("Unsupported Constructor");
53-
}
54-
5551
@Override
5652
public void initialize(URI name, Configuration conf) throws IOException {
5753
// create a proxy for the local filesystem
@@ -120,7 +116,8 @@ public static class PFileChecksum extends FileChecksum {
120116
private String algorithmName;
121117

122118
public PFileChecksum(MD5Hash md5, String algorithmName) {
123-
this.md5 = md5;
119+
this.md5 = new MD5Hash();
120+
this.md5.set(md5);
124121
this.algorithmName = algorithmName;
125122
}
126123

shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
import org.apache.hadoop.fs.permission.FsAction;
4040
import org.apache.hadoop.fs.permission.FsPermission;
4141

42+
import org.apache.hadoop.util.SuppressFBWarnings;
4243
import org.slf4j.Logger;
4344
import org.slf4j.LoggerFactory;
4445

@@ -194,9 +195,8 @@ public static class HadoopFileStatus {
194195
private final FileStatus fileStatus;
195196
private final AclStatus aclStatus;
196197

197-
public HadoopFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException {
198+
private HadoopFileStatus(Configuration conf, FileSystem fs, FileStatus fileStatus, Path file) {
198199

199-
FileStatus fileStatus = fs.getFileStatus(file);
200200
AclStatus aclStatus = null;
201201
if (Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true")) {
202202
//Attempt extended Acl operations only if its enabled, but don't fail the operation regardless.
@@ -211,6 +211,12 @@ public HadoopFileStatus(Configuration conf, FileSystem fs, Path file) throws IOE
211211
this.aclStatus = aclStatus;
212212
}
213213

214+
public static HadoopFileStatus createInstance(Configuration conf, FileSystem fs, Path file) throws IOException {
215+
FileStatus fileStatus = fs.getFileStatus(file);
216+
return new HadoopFileStatus(conf, fs, fileStatus, file);
217+
}
218+
219+
@SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "intended")
214220
public FileStatus getFileStatus() {
215221
return fileStatus;
216222
}

shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ public InputSplitShim() {
7171
}
7272

7373
public InputSplitShim(JobConf conf, Path[] paths, long[] startOffsets,
74-
long[] lengths, String[] locations) throws IOException {
74+
long[] lengths, String[] locations) {
7575
super(conf, paths, startOffsets, lengths, dedup(locations));
7676
_isShrinked = false;
7777
}
@@ -185,10 +185,25 @@ public float getProgress() throws IOException {
185185
* A generic RecordReader that can hand out different recordReaders
186186
* for each chunk in the CombineFileSplit.
187187
*/
188-
public CombineFileRecordReader(JobConf job, CombineFileSplit split,
189-
Reporter reporter,
190-
Class<RecordReader<K, V>> rrClass)
191-
throws IOException {
188+
public static <K, V> CombineFileRecordReader<K, V> createInstance(JobConf job, CombineFileSplit split,
189+
Reporter reporter, Class<RecordReader<K, V>> rrClass) throws IOException {
190+
CombineFileRecordReader<K, V> cfrr = new CombineFileRecordReader<>(job, split, reporter);
191+
assert (split instanceof InputSplitShim);
192+
if (((InputSplitShim) split).isShrinked()) {
193+
cfrr.isShrinked = true;
194+
cfrr.shrinkedLength = ((InputSplitShim) split).getShrinkedLength();
195+
}
196+
try {
197+
cfrr.rrConstructor = rrClass.getDeclaredConstructor(constructorSignature);
198+
cfrr.rrConstructor.setAccessible(true);
199+
} catch (Exception e) {
200+
throw new RuntimeException(rrClass.getName() + " does not have valid constructor", e);
201+
}
202+
cfrr.initNextRecordReader(null);
203+
return cfrr;
204+
}
205+
206+
private CombineFileRecordReader(JobConf job, CombineFileSplit split, Reporter reporter) {
192207
this.split = split;
193208
this.jc = job;
194209
this.reporter = reporter;
@@ -197,21 +212,6 @@ public CombineFileRecordReader(JobConf job, CombineFileSplit split,
197212
this.progress = 0;
198213

199214
isShrinked = false;
200-
201-
assert (split instanceof InputSplitShim);
202-
if (((InputSplitShim) split).isShrinked()) {
203-
isShrinked = true;
204-
shrinkedLength = ((InputSplitShim) split).getShrinkedLength();
205-
}
206-
207-
try {
208-
rrConstructor = rrClass.getDeclaredConstructor(constructorSignature);
209-
rrConstructor.setAccessible(true);
210-
} catch (Exception e) {
211-
throw new RuntimeException(rrClass.getName() +
212-
" does not have valid constructor", e);
213-
}
214-
initNextRecordReader(null);
215215
}
216216

217217
/**
@@ -339,7 +339,7 @@ public RecordReader getRecordReader(JobConf job, CombineFileSplit split,
339339
Class<RecordReader<K, V>> rrClass)
340340
throws IOException {
341341
CombineFileSplit cfSplit = split;
342-
return new CombineFileRecordReader(job, cfSplit, reporter, rrClass);
342+
return CombineFileRecordReader.createInstance(job, cfSplit, reporter, rrClass);
343343
}
344344
}
345345

@@ -377,7 +377,7 @@ abstract public org.apache.hadoop.mapreduce.TaskAttemptContext newTaskAttemptCon
377377
@Override
378378
abstract public FileSystem getNonCachedFileSystem(URI uri, Configuration conf) throws IOException;
379379

380-
private static String[] dedup(String[] locations) throws IOException {
380+
private static String[] dedup(String[] locations) {
381381
Set<String> dedup = new HashSet<String>();
382382
Collections.addAll(dedup, locations);
383383
return dedup.toArray(new String[dedup.size()]);

0 commit comments

Comments
 (0)