Modifier and Type | Method and Description |
---|---|
CommitMessage |
UnawareAppendCompactionTask.doCompact(FileStoreTable table,
AppendOnlyFileStoreWrite write) |
Constructor and Description |
---|
UnawareAppendTableCompactionCoordinator(FileStoreTable table) |
UnawareAppendTableCompactionCoordinator(FileStoreTable table,
boolean isStreaming) |
UnawareAppendTableCompactionCoordinator(FileStoreTable table,
boolean isStreaming,
Predicate filter) |
Modifier and Type | Field and Description |
---|---|
protected FileStoreTable |
SyncTableActionBase.fileStoreTable |
Modifier and Type | Field and Description |
---|---|
protected List<FileStoreTable> |
SyncDatabaseActionBase.tables |
Modifier and Type | Method and Description |
---|---|
protected FileStoreTable |
SynchronizationActionBase.alterTableOptions(Identifier identifier,
FileStoreTable table) |
FileStoreTable |
SyncTableActionBase.fileStoreTable() |
Modifier and Type | Method and Description |
---|---|
protected FileStoreTable |
SynchronizationActionBase.alterTableOptions(Identifier identifier,
FileStoreTable table) |
Modifier and Type | Method and Description |
---|---|
static List<Path> |
PickFilesUtil.getUsedFilesForLatestSnapshot(FileStoreTable table) |
Modifier and Type | Method and Description |
---|---|
void |
MultiUnawareBucketTableScan.addScanTable(FileStoreTable fileStoreTable,
Identifier identifier) |
void |
MultiAwareBucketTableScan.addScanTable(FileStoreTable fileStoreTable,
Identifier identifier) |
Constructor and Description |
---|
UnawareBucketCompactionTopoBuilder(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment env,
String tableIdentifier,
FileStoreTable table) |
UnawareBucketCompactor(FileStoreTable table,
String commitUser,
java.util.function.Supplier<ExecutorService> lazyCompactExecutor) |
Modifier and Type | Field and Description |
---|---|
FileStoreTable |
FullCacheLookupTable.Context.table |
Modifier and Type | Method and Description |
---|---|
static PrimaryKeyPartialLookupTable |
PrimaryKeyPartialLookupTable.createLocalTable(FileStoreTable table,
int[] projection,
File tempPath,
List<String> joinKey,
Set<Integer> requireCachedBucketIds) |
static PrimaryKeyPartialLookupTable |
PrimaryKeyPartialLookupTable.createRemoteTable(FileStoreTable table,
int[] projection,
List<String> joinKey) |
Constructor and Description |
---|
Context(FileStoreTable table,
int[] projection,
Predicate tablePredicate,
Predicate projectedPredicate,
File tempPath,
List<String> joinKey,
Set<Integer> requiredCachedBucketIds) |
Modifier and Type | Method and Description |
---|---|
static boolean |
RemoteTableQuery.isRemoteServiceAvailable(FileStoreTable table) |
Modifier and Type | Field and Description |
---|---|
protected FileStoreTable |
UnawareBucketSink.table |
protected FileStoreTable |
BatchWriteGeneratorTagOperator.table |
protected FileStoreTable |
FlinkSink.table |
protected FileStoreTable |
TableWriteOperator.table |
protected FileStoreTable |
FlinkSinkBuilder.table |
Modifier and Type | Field and Description |
---|---|
protected Map<Identifier,FileStoreTable> |
MultiTablesStoreCompactOperator.tables |
Modifier and Type | Method and Description |
---|---|
StoreSinkWrite |
StoreSinkWrite.WithWriteBufferProvider.provide(FileStoreTable table,
String commitUser,
StoreSinkWriteState state,
org.apache.flink.runtime.io.disk.iomanager.IOManager ioManager,
MemoryPoolFactory memoryPoolFactory,
org.apache.flink.metrics.MetricGroup metricGroup) |
StoreSinkWrite |
StoreSinkWrite.Provider.provide(FileStoreTable table,
String commitUser,
StoreSinkWriteState state,
org.apache.flink.runtime.io.disk.iomanager.IOManager ioManager,
MemorySegmentPool memoryPool,
org.apache.flink.metrics.MetricGroup metricGroup) |
void |
StoreSinkWrite.replace(FileStoreTable newTable)
Replace the internal
TableWriteImpl with the one provided by newWriteProvider . |
void |
StoreSinkWriteImpl.replace(FileStoreTable newTable) |
static org.apache.flink.streaming.api.datastream.DataStreamSink<?> |
UnawareBucketCompactionSink.sink(FileStoreTable table,
org.apache.flink.streaming.api.datastream.DataStream<UnawareAppendCompactionTask> input) |
Constructor and Description |
---|
AppendBypassCompactWorkerOperator(FileStoreTable table,
String commitUser) |
AppendCompactWorkerOperator(FileStoreTable table,
String commitUser) |
AppendOnlySingleTableCompactionWorkerOperator(FileStoreTable table,
String commitUser) |
AsyncLookupSinkWrite(FileStoreTable table,
String commitUser,
StoreSinkWriteState state,
org.apache.flink.runtime.io.disk.iomanager.IOManager ioManager,
boolean ignorePreviousFiles,
boolean waitCompaction,
boolean isStreaming,
MemorySegmentPool memoryPool,
org.apache.flink.metrics.MetricGroup metricGroup) |
BatchWriteGeneratorTagOperator(CommitterOperator<CommitT,GlobalCommitT> commitOperator,
FileStoreTable table) |
CompactorSink(FileStoreTable table) |
CompactorSinkBuilder(FileStoreTable table) |
DynamicBucketCompactSink(FileStoreTable table,
Map<String,String> overwritePartition) |
DynamicBucketRowWriteOperator(FileStoreTable table,
StoreSinkWrite.Provider storeSinkWriteProvider,
String initialCommitUser) |
DynamicBucketSink(FileStoreTable table,
Map<String,String> overwritePartition) |
FileIndexProcessor(FileStoreTable table) |
FixedBucketSink(FileStoreTable table,
Map<String,String> overwritePartition,
LogSinkFunction logSinkFunction) |
FlinkSink(FileStoreTable table,
boolean ignorePreviousFiles) |
FlinkWriteSink(FileStoreTable table,
Map<String,String> overwritePartition) |
GlobalFullCompactionSinkWrite(FileStoreTable table,
String commitUser,
StoreSinkWriteState state,
org.apache.flink.runtime.io.disk.iomanager.IOManager ioManager,
boolean ignorePreviousFiles,
boolean waitCompaction,
int deltaCommits,
boolean isStreaming,
MemorySegmentPool memoryPool,
org.apache.flink.metrics.MetricGroup metricGroup) |
RewriteFileIndexSink(FileStoreTable table) |
RowDataStoreWriteOperator(FileStoreTable table,
LogSinkFunction logSinkFunction,
StoreSinkWrite.Provider storeSinkWriteProvider,
String initialCommitUser) |
RowDynamicBucketSink(FileStoreTable table,
Map<String,String> overwritePartition) |
RowUnawareBucketSink(FileStoreTable table,
Map<String,String> overwritePartitions,
LogSinkFunction logSinkFunction,
Integer parallelism) |
StoreCommitter(FileStoreTable table,
TableCommit commit,
Committer.Context context) |
StoreCompactOperator(FileStoreTable table,
StoreSinkWrite.Provider storeSinkWriteProvider,
String initialCommitUser) |
StoreSinkWriteImpl(FileStoreTable table,
String commitUser,
StoreSinkWriteState state,
org.apache.flink.runtime.io.disk.iomanager.IOManager ioManager,
boolean ignorePreviousFiles,
boolean waitCompaction,
boolean isStreamingMode,
MemoryPoolFactory memoryPoolFactory,
org.apache.flink.metrics.MetricGroup metricGroup) |
StoreSinkWriteImpl(FileStoreTable table,
String commitUser,
StoreSinkWriteState state,
org.apache.flink.runtime.io.disk.iomanager.IOManager ioManager,
boolean ignorePreviousFiles,
boolean waitCompaction,
boolean isStreamingMode,
MemorySegmentPool memoryPool,
org.apache.flink.metrics.MetricGroup metricGroup) |
TableWriteOperator(FileStoreTable table,
StoreSinkWrite.Provider storeSinkWriteProvider,
String initialCommitUser) |
UnawareBucketCompactionSink(FileStoreTable table) |
UnawareBucketSink(FileStoreTable table,
Map<String,String> overwritePartitions,
LogSinkFunction logSinkFunction,
Integer parallelism) |
Modifier and Type | Method and Description |
---|---|
Map<Identifier,FileStoreTable> |
CdcRecordStoreMultiWriteOperator.tables() |
Modifier and Type | Method and Description |
---|---|
FlinkCdcSyncDatabaseSinkBuilder<T> |
FlinkCdcSyncDatabaseSinkBuilder.withTables(List<FileStoreTable> tables) |
Constructor and Description |
---|
CdcDynamicBucketSink(FileStoreTable table) |
CdcDynamicBucketSinkBase(FileStoreTable table) |
CdcDynamicBucketWriteOperator(FileStoreTable table,
StoreSinkWrite.Provider storeSinkWriteProvider,
String initialCommitUser) |
CdcFixedBucketSink(FileStoreTable table) |
CdcRecordStoreWriteOperator(FileStoreTable table,
StoreSinkWrite.Provider storeSinkWriteProvider,
String initialCommitUser) |
CdcUnawareBucketSink(FileStoreTable table,
Integer parallelism) |
CdcUnawareBucketWriteOperator(FileStoreTable table,
StoreSinkWrite.Provider storeSinkWriteProvider,
String initialCommitUser) |
Constructor and Description |
---|
GlobalDynamicBucketSink(FileStoreTable table,
Map<String,String> overwritePartition) |
Modifier and Type | Method and Description |
---|---|
static PartitionMarkDone |
PartitionMarkDone.create(boolean isStreaming,
boolean isRestored,
org.apache.flink.api.common.state.OperatorStateStore stateStore,
FileStoreTable table) |
Modifier and Type | Field and Description |
---|---|
protected FileStoreTable |
TableSorter.table |
Modifier and Type | Method and Description |
---|---|
static TableSorter |
TableSorter.getSorter(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment batchTEnv,
org.apache.flink.streaming.api.datastream.DataStream<org.apache.flink.table.data.RowData> origin,
FileStoreTable fileStoreTable,
TableSortInfo sortInfo) |
static <KEY> org.apache.flink.streaming.api.datastream.DataStream<org.apache.flink.table.data.RowData> |
SortUtils.sortStreamByKey(org.apache.flink.streaming.api.datastream.DataStream<org.apache.flink.table.data.RowData> inputStream,
FileStoreTable table,
RowType sortKeyType,
org.apache.flink.api.common.typeinfo.TypeInformation<KEY> keyTypeInformation,
SerializableSupplier<Comparator<KEY>> shuffleKeyComparator,
org.apache.paimon.flink.sorter.SortUtils.KeyAbstract<KEY> shuffleKeyAbstract,
org.apache.paimon.flink.sorter.SortUtils.ShuffleKeyConvertor<KEY> convertor,
TableSortInfo tableSortInfo)
Sort the input stream by the key specified.
|
Constructor and Description |
---|
HilbertSorter(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment batchTEnv,
org.apache.flink.streaming.api.datastream.DataStream<org.apache.flink.table.data.RowData> origin,
FileStoreTable table,
TableSortInfo tableSortInfo) |
OrderSorter(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment batchTEnv,
org.apache.flink.streaming.api.datastream.DataStream<org.apache.flink.table.data.RowData> origin,
FileStoreTable table,
TableSortInfo tableSortInfo) |
TableSorter(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment batchTEnv,
org.apache.flink.streaming.api.datastream.DataStream<org.apache.flink.table.data.RowData> origin,
FileStoreTable table,
List<String> orderColNames) |
ZorderSorter(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment batchTEnv,
org.apache.flink.streaming.api.datastream.DataStream<org.apache.flink.table.data.RowData> origin,
FileStoreTable table,
TableSortInfo sortInfo) |
Constructor and Description |
---|
AppendBypassCoordinateOperator(FileStoreTable table,
org.apache.flink.streaming.runtime.tasks.ProcessingTimeService processingTimeService,
org.apache.flink.api.common.operators.MailboxExecutor mailbox) |
AppendBypassCoordinateOperatorFactory(FileStoreTable table) |
BucketUnawareCompactSource(FileStoreTable table,
boolean isStreaming,
long scanInterval,
Predicate filter) |
CompactorSourceBuilder(String tableIdentifier,
FileStoreTable table) |
RewriteFileIndexSource(FileStoreTable table,
Predicate partitionPredicate) |
Modifier and Type | Method and Description |
---|---|
FileStoreTable |
PaimonInputSplit.getTable() |
Constructor and Description |
---|
PaimonInputSplit(String path,
DataSplit split,
FileStoreTable table) |
Modifier and Type | Method and Description |
---|---|
HiveMigrator.MigrateTask |
HiveMigrator.importUnPartitionedTableTask(FileIO fileIO,
org.apache.hadoop.hive.metastore.api.Table sourceTable,
FileStoreTable paimonTable,
Map<Path,Path> rollback) |
Constructor and Description |
---|
MigrateTask(FileIO fileIO,
String format,
String location,
FileStoreTable paimonTable,
BinaryRow partitionRow,
Path newDir,
Map<Path,Path> rollback) |
Modifier and Type | Method and Description |
---|---|
static FileStoreTable |
HiveUtils.createFileStoreTable(org.apache.hadoop.mapred.JobConf jobConf) |
Modifier and Type | Method and Description |
---|---|
static org.apache.hadoop.mapred.InputSplit[] |
HiveSplitGenerator.generateSplits(FileStoreTable table,
org.apache.hadoop.mapred.JobConf jobConf) |
Modifier and Type | Field and Description |
---|---|
protected FileStoreTable |
AbstractIcebergCommitCallback.table |
Constructor and Description |
---|
AbstractIcebergCommitCallback(FileStoreTable table,
String commitUser) |
AppendOnlyIcebergCommitCallback(FileStoreTable table,
String commitUser) |
PrimaryKeyIcebergCommitCallback(FileStoreTable table,
String commitUser) |
Modifier and Type | Method and Description |
---|---|
static IcebergManifestList |
IcebergManifestList.create(FileStoreTable table,
IcebergPathFactory pathFactory) |
static IcebergManifestFile |
IcebergManifestFile.create(FileStoreTable table,
IcebergPathFactory pathFactory) |
Constructor and Description |
---|
OrphanFilesClean(FileStoreTable table) |
Modifier and Type | Method and Description |
---|---|
static List<PartitionMarkDoneAction> |
PartitionMarkDoneAction.createActions(FileStoreTable fileStoreTable,
CoreOptions options) |
static MetastoreClient |
PartitionMarkDoneAction.createMetastoreClient(FileStoreTable table,
CoreOptions options) |
Modifier and Type | Class and Description |
---|---|
class |
PrivilegedFileStoreTable
FileStoreTable with privilege checks. |
Modifier and Type | Method and Description |
---|---|
FileStoreTable |
PrivilegedFileStoreTable.copy(Map<String,String> dynamicOptions) |
FileStoreTable |
PrivilegedFileStoreTable.copy(TableSchema newTableSchema) |
FileStoreTable |
PrivilegedFileStoreTable.copyWithLatestSchema() |
FileStoreTable |
PrivilegedFileStoreTable.copyWithoutTimeTravel(Map<String,String> dynamicOptions) |
FileStoreTable |
PrivilegedFileStoreTable.switchToBranch(String branchName) |
Constructor and Description |
---|
PrivilegedFileStoreTable(FileStoreTable wrapped,
PrivilegeChecker privilegeChecker,
Identifier identifier) |
Modifier and Type | Field and Description |
---|---|
protected FileStoreTable |
TableSorter.table |
Modifier and Type | Method and Description |
---|---|
static TableSorter |
TableSorter.getSorter(FileStoreTable table,
TableSorter.OrderType orderType,
List<String> orderColumns) |
Constructor and Description |
---|
HilbertSorter(FileStoreTable table,
List<String> orderColumns) |
OrderSorter(FileStoreTable table,
List<String> orderColNames) |
TableSorter(FileStoreTable table,
List<String> orderColNames) |
ZorderSorter(FileStoreTable table,
List<String> zOrderColNames) |
Modifier and Type | Class and Description |
---|---|
class |
DelegatedFileStoreTable
Delegated
FileStoreTable . |
class |
FallbackReadFileStoreTable
A
FileStoreTable which mainly read from the current branch. |
Modifier and Type | Field and Description |
---|---|
protected FileStoreTable |
DelegatedFileStoreTable.wrapped |
Modifier and Type | Method and Description |
---|---|
FileStoreTable |
FileStoreTable.copy(Map<String,String> dynamicOptions) |
FileStoreTable |
FallbackReadFileStoreTable.copy(Map<String,String> dynamicOptions) |
FileStoreTable |
FileStoreTable.copy(TableSchema newTableSchema) |
FileStoreTable |
FallbackReadFileStoreTable.copy(TableSchema newTableSchema) |
FileStoreTable |
FileStoreTable.copyWithLatestSchema()
TODO: this method is weird, old options will overwrite new options.
|
FileStoreTable |
FallbackReadFileStoreTable.copyWithLatestSchema() |
FileStoreTable |
FileStoreTable.copyWithoutTimeTravel(Map<String,String> dynamicOptions)
Doesn't change table schema even when there exists time travel scan options.
|
FileStoreTable |
FallbackReadFileStoreTable.copyWithoutTimeTravel(Map<String,String> dynamicOptions) |
static FileStoreTable |
FileStoreTableFactory.create(CatalogContext context) |
static FileStoreTable |
FileStoreTableFactory.create(FileIO fileIO,
Options options) |
static FileStoreTable |
FileStoreTableFactory.create(FileIO fileIO,
Path path) |
static FileStoreTable |
FileStoreTableFactory.create(FileIO fileIO,
Path tablePath,
TableSchema tableSchema) |
static FileStoreTable |
FileStoreTableFactory.create(FileIO fileIO,
Path tablePath,
TableSchema tableSchema,
CatalogEnvironment catalogEnvironment) |
static FileStoreTable |
FileStoreTableFactory.create(FileIO fileIO,
Path tablePath,
TableSchema tableSchema,
Options dynamicOptions,
CatalogEnvironment catalogEnvironment) |
FileStoreTable |
FileStoreTable.switchToBranch(String branchName)
Get
DataTable with branch identified by branchName . |
FileStoreTable |
FallbackReadFileStoreTable.switchToBranch(String branchName) |
Constructor and Description |
---|
DelegatedFileStoreTable(FileStoreTable wrapped) |
FallbackReadFileStoreTable(FileStoreTable main,
FileStoreTable fallback) |
Constructor and Description |
---|
LocalTableQuery(FileStoreTable table) |
Modifier and Type | Field and Description |
---|---|
static Map<String,java.util.function.Function<FileStoreTable,Table>> |
SystemTableLoader.SYSTEM_TABLE_LOADERS |
Modifier and Type | Method and Description |
---|---|
static Table |
SystemTableLoader.load(String type,
FileStoreTable dataTable) |
Modifier and Type | Method and Description |
---|---|
Map<String,String> |
TagPreview.timeTravel(FileStoreTable table,
String tag) |
Copyright © 2023–2024 The Apache Software Foundation. All rights reserved.