diff --git a/ql/src/test/queries/clientpositive/orc_diff_part_cols.q b/ql/src/test/queries/clientpositive/orc_diff_part_cols.q index d1c615d44dd8..33ffc62812e9 100644 --- a/ql/src/test/queries/clientpositive/orc_diff_part_cols.q +++ b/ql/src/test/queries/clientpositive/orc_diff_part_cols.q @@ -14,6 +14,6 @@ INSERT OVERWRITE TABLE test_orc PARTITION (part = '1') SELECT key FROM src LIMIT ALTER TABLE test_orc ADD COLUMNS (cnt INT); -INSERT OVERWRITE TABLE test_orc PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key LIMIT 5; +INSERT OVERWRITE TABLE test_orc PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key ORDER BY key LIMIT 5; SELECT * FROM test_orc; diff --git a/ql/src/test/results/clientpositive/orc_diff_part_cols.q.out b/ql/src/test/results/clientpositive/orc_diff_part_cols.q.out index 543b67b8dc21..5a05757814c2 100644 --- a/ql/src/test/results/clientpositive/orc_diff_part_cols.q.out +++ b/ql/src/test/results/clientpositive/orc_diff_part_cols.q.out @@ -37,11 +37,11 @@ POSTHOOK: type: ALTERTABLE_ADDCOLS POSTHOOK: Input: default@test_orc POSTHOOK: Output: default@test_orc POSTHOOK: Lineage: test_orc PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: INSERT OVERWRITE TABLE test_orc PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key LIMIT 5 +PREHOOK: query: INSERT OVERWRITE TABLE test_orc PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key ORDER BY key LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_orc@part=2 -POSTHOOK: query: INSERT OVERWRITE TABLE test_orc PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key LIMIT 5 +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc PARTITION (part = '2') SELECT key, count(*) FROM src GROUP BY key ORDER BY key LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_orc@part=2 diff --git a/service/src/java/org/apache/hive/service/auth/TUGIContainingProcessor.java b/service/src/java/org/apache/hive/service/auth/TUGIContainingProcessor.java index 31158ff539a7..4118211e779e 100644 --- a/service/src/java/org/apache/hive/service/auth/TUGIContainingProcessor.java +++ b/service/src/java/org/apache/hive/service/auth/TUGIContainingProcessor.java @@ -3,7 +3,6 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; -import java.util.HashSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -21,27 +20,6 @@ public class TUGIContainingProcessor implements TProcessor{ private final HadoopShims shim; private final boolean isFsCacheDisabled; - // Keep all session UGIs to close their filesystems at Statement.close() and Connection.close() - // it's done to prevent LeaseChecker thread leak and to clean FileSystem.CACHE - private static final ThreadLocal> ugis = new ThreadLocal>() { - @Override - protected HashSet initialValue() { - return new HashSet(); - } - }; - - /** - * Close FileSystem for session UGIs - */ - public static void closeAllFsForUGIs() { - HashSet ugisSet = ugis.get(); - HadoopShims sh = ShimLoader.getHadoopShims(); - for (UserGroupInformation ugi : ugisSet) { - sh.closeAllForUGI(ugi); - } - ugisSet.clear(); - } - public TUGIContainingProcessor(TProcessor wrapped, Configuration conf) { this.wrapped = wrapped; this.isFsCacheDisabled = conf.getBoolean(String.format("fs.%s.impl.disable.cache", @@ -56,8 +34,6 @@ public boolean process(final TProtocol in, final TProtocol out) throws TExceptio try { clientUgi = shim.createRemoteUser(((TSaslServerTransport)in.getTransport()). getSaslServer().getAuthorizationID(), new ArrayList()); - // remember all UGIs created during the session - ugis.get().add(clientUgi); return shim.doAs(clientUgi, new PrivilegedExceptionAction() { public Boolean run() { try { diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java index b8853acf7260..7254491672b1 100644 --- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java @@ -18,6 +18,7 @@ package org.apache.hive.service.cli.session; +import java.io.File; import java.io.IOException; import java.util.HashMap; import java.util.HashSet; @@ -25,6 +26,7 @@ import java.util.Map; import java.util.Set; +import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; @@ -35,7 +37,6 @@ import org.apache.hadoop.hive.ql.history.HiveHistory; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.common.util.HiveVersionInfo; -import org.apache.hive.service.auth.TUGIContainingProcessor; import org.apache.hive.service.cli.FetchOrientation; import org.apache.hive.service.cli.GetInfoType; import org.apache.hive.service.cli.GetInfoValue; @@ -310,8 +311,6 @@ public void close() throws HiveSQLException { hiveHist.closeStream(); } sessionState.close(); - // close all FileSystem for all UGIs created during this Connection live time - TUGIContainingProcessor.closeAllFsForUGIs(); release(); } catch (IOException ioe) { release(); @@ -346,8 +345,6 @@ public void closeOperation(OperationHandle opHandle) throws HiveSQLException { try { operationManager.closeOperation(opHandle); opHandleSet.remove(opHandle); - // close all FileSystem for all UGIs created during this Statement live time - TUGIContainingProcessor.closeAllFsForUGIs(); } finally { release(); }