Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -555,6 +555,7 @@ public int createDynamicBucket(int bucketNum) {
protected BitSet filesCreatedPerBucket = new BitSet();

protected boolean isCompactionTable = false;
protected boolean isMmTable = false;

private void initializeSpecPath() {
// For a query of the type:
Expand Down Expand Up @@ -625,6 +626,7 @@ protected void initializeOp(Configuration hconf) throws HiveException {
multiFileSpray = conf.isMultiFileSpray();
this.isBucketed = hconf.getInt(hive_metastoreConstants.BUCKET_COUNT, 0) > 0;
this.isCompactionTable = conf.isCompactionTable();
this.isMmTable = conf.isMmTable();
totalFiles = conf.getTotalFiles();
numFiles = conf.getNumFiles();
dpCtx = conf.getDynPartCtx();
Expand Down Expand Up @@ -1189,7 +1191,7 @@ public void process(Object row, int tag) throws HiveException {
// for a given operator branch prediction should work quite nicely on it.
// RecordUpdater expects to get the actual row, not a serialized version of it. Thus we
// pass the row rather than recordValue.
if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || conf.isMmTable() || isCompactionTable) {
if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || isMmTable || isCompactionTable) {
writerOffset = bucketId;
if (!isCompactionTable) {
writerOffset = findWriterOffset(row);
Expand Down Expand Up @@ -1274,7 +1276,7 @@ private void closeRecordwriters(boolean abort) {
protected boolean areAllTrue(boolean[] statsFromRW) {
// If we are doing an acid operation they will always all be true as RecordUpdaters always
// collect stats
if (conf.getWriteType() != AcidUtils.Operation.NOT_ACID && !conf.isMmTable() && !isCompactionTable) {
if (conf.getWriteType() != AcidUtils.Operation.NOT_ACID && !isMmTable && !isCompactionTable) {
return true;
}
for(boolean b : statsFromRW) {
Expand Down
13 changes: 9 additions & 4 deletions ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.conf.Configuration;
Expand Down Expand Up @@ -483,7 +482,8 @@ private BucketMetaData(int bucketId, int copyNumber) {
* @return true, if the tblProperties contains {@link AcidUtils#COMPACTOR_TABLE_PROPERTY}
*/
public static boolean isCompactionTable(Properties tblProperties) {
return tblProperties != null && isCompactionTable(Maps.fromProperties(tblProperties));
return tblProperties != null &&
StringUtils.isNotBlank((String) tblProperties.get(COMPACTOR_TABLE_PROPERTY));
}

/**
Expand Down Expand Up @@ -1948,7 +1948,11 @@ private static boolean isDirUsable(Path child, long visibilityTxnId, List<Path>
}

public static boolean isTablePropertyTransactional(Properties props) {
return isTablePropertyTransactional(Maps.fromProperties(props));
String resultStr = (String) props.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
if (resultStr == null) {
resultStr = (String) props.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.toUpperCase());
}
return Boolean.parseBoolean(resultStr);
}

public static boolean isTablePropertyTransactional(Map<String, String> parameters) {
Expand Down Expand Up @@ -2205,7 +2209,8 @@ public static boolean isInsertOnlyTable(Table table) {
}

public static boolean isInsertOnlyTable(Properties params) {
return isInsertOnlyTable(Maps.fromProperties(params));
String transactionalProp = (String) params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
return INSERTONLY_TRANSACTIONAL_PROPERTY.equalsIgnoreCase(transactionalProp);
}

/**
Expand Down
4 changes: 1 addition & 3 deletions ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import java.util.Objects;
import java.util.Set;

import com.google.common.collect.Maps;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
Expand Down Expand Up @@ -375,8 +374,7 @@ public boolean isIcebergTable() {
if (getTable() != null) {
return DDLUtils.isIcebergTable(table);
} else {
return MetaStoreUtils.isIcebergTable(
Maps.fromProperties(getTableInfo().getProperties()));
return MetaStoreUtils.isIcebergTable(getTableInfo().getProperties());
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,10 @@ public static boolean isIcebergTable(Map<String, String> params) {
return HiveMetaHook.ICEBERG.equalsIgnoreCase(params.get(HiveMetaHook.TABLE_TYPE));
}

public static boolean isIcebergTable(Properties params) {
return HiveMetaHook.ICEBERG.equalsIgnoreCase((String) params.get(HiveMetaHook.TABLE_TYPE));
}

public static boolean isTranslatedToExternalTable(Table table) {
Map<String, String> params = table.getParameters();
return params != null && MetaStoreUtils.isPropertyTrue(params, HiveMetaHook.EXTERNAL)
Expand Down