public abstract class BaseDataTableSource extends FlinkTableSource implements org.apache.flink.table.connector.source.LookupTableSource, org.apache.flink.table.connector.source.abilities.SupportsWatermarkPushDown, org.apache.flink.table.connector.source.abilities.SupportsAggregatePushDown, SupportsLookupCustomShuffle
StaticFileStoreSource
or ContinuousFileStoreSource
under
batch mode or streaming mode.FlinkTableSource.SplitStatistics
org.apache.flink.table.connector.source.LookupTableSource.LookupContext, org.apache.flink.table.connector.source.LookupTableSource.LookupRuntimeProvider
org.apache.flink.table.connector.source.DynamicTableSource.Context, org.apache.flink.table.connector.source.DynamicTableSource.DataStructureConverter
SupportsLookupCustomShuffle.InputDataPartitioner
Modifier and Type | Field and Description |
---|---|
protected org.apache.flink.table.factories.DynamicTableFactory.Context |
context |
protected Long |
countPushed |
protected LogStoreTableFactory |
logStoreTableFactory |
protected org.apache.flink.table.catalog.ObjectIdentifier |
tableIdentifier |
protected boolean |
unbounded |
protected org.apache.flink.api.common.eventtime.WatermarkStrategy<org.apache.flink.table.data.RowData> |
watermarkStrategy |
FLINK_INFER_SCAN_PARALLELISM, limit, predicate, projectFields, splitStatistics, table
Constructor and Description |
---|
BaseDataTableSource(org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier,
Table table,
boolean unbounded,
org.apache.flink.table.factories.DynamicTableFactory.Context context,
LogStoreTableFactory logStoreTableFactory,
Predicate predicate,
int[][] projectFields,
Long limit,
org.apache.flink.api.common.eventtime.WatermarkStrategy<org.apache.flink.table.data.RowData> watermarkStrategy,
Long countPushed) |
Modifier and Type | Method and Description |
---|---|
boolean |
applyAggregates(List<int[]> groupingSets,
List<org.apache.flink.table.expressions.AggregateExpression> aggregateExpressions,
org.apache.flink.table.types.DataType producedDataType) |
void |
applyWatermark(org.apache.flink.api.common.eventtime.WatermarkStrategy<org.apache.flink.table.data.RowData> watermarkStrategy) |
String |
asSummaryString() |
protected abstract List<String> |
dynamicPartitionFilteringFields() |
org.apache.flink.table.connector.ChangelogMode |
getChangelogMode() |
protected FileStoreLookupFunction |
getFileStoreLookupFunction(org.apache.flink.table.connector.source.LookupTableSource.LookupContext context,
FileStoreTable table,
int[] projection,
int[] joinKey) |
org.apache.flink.table.connector.source.LookupTableSource.LookupRuntimeProvider |
getLookupRuntimeProvider(org.apache.flink.table.connector.source.LookupTableSource.LookupContext context) |
Optional<SupportsLookupCustomShuffle.InputDataPartitioner> |
getPartitioner()
This method is used to retrieve a custom partitioner that will be applied to the input stream
of lookup-join node.
|
org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider |
getScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanContext scanContext) |
boolean |
isUnbounded() |
applyFilters, applyLimit, applyProjection, getPredicateWithScanPartitions, getTable, inferSourceParallelism, scanSplitsForInference, supportsNestedProjection
protected final org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier
protected final boolean unbounded
protected final org.apache.flink.table.factories.DynamicTableFactory.Context context
@Nullable protected final LogStoreTableFactory logStoreTableFactory
@Nullable protected org.apache.flink.api.common.eventtime.WatermarkStrategy<org.apache.flink.table.data.RowData> watermarkStrategy
public BaseDataTableSource(org.apache.flink.table.catalog.ObjectIdentifier tableIdentifier, Table table, boolean unbounded, org.apache.flink.table.factories.DynamicTableFactory.Context context, @Nullable LogStoreTableFactory logStoreTableFactory, @Nullable Predicate predicate, @Nullable int[][] projectFields, @Nullable Long limit, @Nullable org.apache.flink.api.common.eventtime.WatermarkStrategy<org.apache.flink.table.data.RowData> watermarkStrategy, @Nullable Long countPushed)
public org.apache.flink.table.connector.ChangelogMode getChangelogMode()
getChangelogMode
in interface org.apache.flink.table.connector.source.ScanTableSource
public org.apache.flink.table.connector.source.ScanTableSource.ScanRuntimeProvider getScanRuntimeProvider(org.apache.flink.table.connector.source.ScanTableSource.ScanContext scanContext)
getScanRuntimeProvider
in interface org.apache.flink.table.connector.source.ScanTableSource
public void applyWatermark(org.apache.flink.api.common.eventtime.WatermarkStrategy<org.apache.flink.table.data.RowData> watermarkStrategy)
applyWatermark
in interface org.apache.flink.table.connector.source.abilities.SupportsWatermarkPushDown
public org.apache.flink.table.connector.source.LookupTableSource.LookupRuntimeProvider getLookupRuntimeProvider(org.apache.flink.table.connector.source.LookupTableSource.LookupContext context)
getLookupRuntimeProvider
in interface org.apache.flink.table.connector.source.LookupTableSource
protected FileStoreLookupFunction getFileStoreLookupFunction(org.apache.flink.table.connector.source.LookupTableSource.LookupContext context, FileStoreTable table, int[] projection, int[] joinKey)
public boolean applyAggregates(List<int[]> groupingSets, List<org.apache.flink.table.expressions.AggregateExpression> aggregateExpressions, org.apache.flink.table.types.DataType producedDataType)
applyAggregates
in interface org.apache.flink.table.connector.source.abilities.SupportsAggregatePushDown
public String asSummaryString()
asSummaryString
in interface org.apache.flink.table.connector.source.DynamicTableSource
public boolean isUnbounded()
isUnbounded
in class FlinkTableSource
public Optional<SupportsLookupCustomShuffle.InputDataPartitioner> getPartitioner()
SupportsLookupCustomShuffle
getPartitioner
in interface SupportsLookupCustomShuffle
SupportsLookupCustomShuffle.InputDataPartitioner
that defines how records should be distributed across
the different subtasks. If the connector expects the input data to remain in its original
distribution, an Optional.empty()
should be returned.Copyright © 2023–2025 The Apache Software Foundation. All rights reserved.